diff --git a/data/annotations/sklearn__annotations.json b/data/annotations/sklearn__annotations.json index 62a49212f..a0d1e2e84 100644 --- a/data/annotations/sklearn__annotations.json +++ b/data/annotations/sklearn__annotations.json @@ -60,6 +60,46 @@ "defaultType": "string", "defaultValue": "uniform" }, + "sklearn/sklearn.cluster._affinity_propagation/AffinityPropagation/__init__/max_iter": { + "target": "sklearn/sklearn.cluster._affinity_propagation/AffinityPropagation/__init__/max_iter", + "defaultType": "number", + "defaultValue": 200.0 + }, + "sklearn/sklearn.cluster._affinity_propagation/AffinityPropagation/__init__/convergence_iter": { + "target": "sklearn/sklearn.cluster._affinity_propagation/AffinityPropagation/__init__/convergence_iter", + "defaultType": "number", + "defaultValue": 15.0 + }, + "sklearn/sklearn.cluster._affinity_propagation/AffinityPropagation/__init__/copy": { + "target": "sklearn/sklearn.cluster._affinity_propagation/AffinityPropagation/__init__/copy", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.cluster._affinity_propagation/AffinityPropagation/__init__/preference": { + "target": "sklearn/sklearn.cluster._affinity_propagation/AffinityPropagation/__init__/preference", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.cluster._affinity_propagation/AffinityPropagation/__init__/affinity": { + "target": "sklearn/sklearn.cluster._affinity_propagation/AffinityPropagation/__init__/affinity", + "defaultType": "string", + "defaultValue": "euclidean" + }, + "sklearn/sklearn.cluster._affinity_propagation/AffinityPropagation/__init__/verbose": { + "target": "sklearn/sklearn.cluster._affinity_propagation/AffinityPropagation/__init__/verbose", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.cluster._affinity_propagation/AffinityPropagation/__init__/random_state": { + "target": "sklearn/sklearn.cluster._affinity_propagation/AffinityPropagation/__init__/random_state", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.cluster._affinity_propagation/AffinityPropagation/fit/y": { + "target": "sklearn/sklearn.cluster._affinity_propagation/AffinityPropagation/fit/y", + "defaultType": "none", + "defaultValue": null + }, "sklearn/sklearn.cluster._affinity_propagation/affinity_propagation/preference": { "target": "sklearn/sklearn.cluster._affinity_propagation/affinity_propagation/preference", "defaultType": "none", @@ -100,6 +140,111 @@ "defaultType": "none", "defaultValue": null }, + "sklearn/sklearn.cluster._agglomerative/AgglomerativeClustering/__init__/memory": { + "target": "sklearn/sklearn.cluster._agglomerative/AgglomerativeClustering/__init__/memory", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.cluster._agglomerative/AgglomerativeClustering/__init__/connectivity": { + "target": "sklearn/sklearn.cluster._agglomerative/AgglomerativeClustering/__init__/connectivity", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.cluster._agglomerative/AgglomerativeClustering/__init__/compute_full_tree": { + "target": "sklearn/sklearn.cluster._agglomerative/AgglomerativeClustering/__init__/compute_full_tree", + "defaultType": "string", + "defaultValue": "auto" + }, + "sklearn/sklearn.cluster._agglomerative/AgglomerativeClustering/__init__/compute_distances": { + "target": "sklearn/sklearn.cluster._agglomerative/AgglomerativeClustering/__init__/compute_distances", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.cluster._agglomerative/AgglomerativeClustering/fit_predict/y": { + "target": "sklearn/sklearn.cluster._agglomerative/AgglomerativeClustering/fit_predict/y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.cluster._agglomerative/FeatureAgglomeration/__init__/affinity": { + "target": "sklearn/sklearn.cluster._agglomerative/FeatureAgglomeration/__init__/affinity", + "defaultType": "string", + "defaultValue": "euclidean" + }, + "sklearn/sklearn.cluster._agglomerative/FeatureAgglomeration/__init__/memory": { + "target": "sklearn/sklearn.cluster._agglomerative/FeatureAgglomeration/__init__/memory", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.cluster._agglomerative/FeatureAgglomeration/__init__/compute_full_tree": { + "target": "sklearn/sklearn.cluster._agglomerative/FeatureAgglomeration/__init__/compute_full_tree", + "defaultType": "string", + "defaultValue": "auto" + }, + "sklearn/sklearn.cluster._agglomerative/FeatureAgglomeration/__init__/linkage": { + "target": "sklearn/sklearn.cluster._agglomerative/FeatureAgglomeration/__init__/linkage", + "defaultType": "string", + "defaultValue": "ward" + }, + "sklearn/sklearn.cluster._agglomerative/FeatureAgglomeration/__init__/distance_threshold": { + "target": "sklearn/sklearn.cluster._agglomerative/FeatureAgglomeration/__init__/distance_threshold", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.cluster._agglomerative/FeatureAgglomeration/__init__/compute_distances": { + "target": "sklearn/sklearn.cluster._agglomerative/FeatureAgglomeration/__init__/compute_distances", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.cluster._agglomerative/FeatureAgglomeration/fit/y": { + "target": "sklearn/sklearn.cluster._agglomerative/FeatureAgglomeration/fit/y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.cluster._birch/Birch/__init__/compute_labels": { + "target": "sklearn/sklearn.cluster._birch/Birch/__init__/compute_labels", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.cluster._birch/Birch/__init__/copy": { + "target": "sklearn/sklearn.cluster._birch/Birch/__init__/copy", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.cluster._birch/Birch/fit/y": { + "target": "sklearn/sklearn.cluster._birch/Birch/fit/y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.cluster._dbscan/DBSCAN/__init__/metric_params": { + "target": "sklearn/sklearn.cluster._dbscan/DBSCAN/__init__/metric_params", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.cluster._dbscan/DBSCAN/__init__/p": { + "target": "sklearn/sklearn.cluster._dbscan/DBSCAN/__init__/p", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.cluster._dbscan/DBSCAN/fit/y": { + "target": "sklearn/sklearn.cluster._dbscan/DBSCAN/fit/y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.cluster._dbscan/DBSCAN/fit/sample_weight": { + "target": "sklearn/sklearn.cluster._dbscan/DBSCAN/fit/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.cluster._dbscan/DBSCAN/fit_predict/y": { + "target": "sklearn/sklearn.cluster._dbscan/DBSCAN/fit_predict/y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.cluster._dbscan/DBSCAN/fit_predict/sample_weight": { + "target": "sklearn/sklearn.cluster._dbscan/DBSCAN/fit_predict/sample_weight", + "defaultType": "none", + "defaultValue": null + }, "sklearn/sklearn.cluster._dbscan/dbscan/eps": { "target": "sklearn/sklearn.cluster._dbscan/dbscan/eps", "defaultType": "number", @@ -145,6 +290,81 @@ "defaultType": "none", "defaultValue": null }, + "sklearn/sklearn.cluster._kmeans/KMeans/__init__/copy_x": { + "target": "sklearn/sklearn.cluster._kmeans/KMeans/__init__/copy_x", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.cluster._kmeans/KMeans/fit/sample_weight": { + "target": "sklearn/sklearn.cluster._kmeans/KMeans/fit/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/__init__/init": { + "target": "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/__init__/init", + "defaultType": "string", + "defaultValue": "k-means++" + }, + "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/__init__/compute_labels": { + "target": "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/__init__/compute_labels", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/__init__/max_no_improvement": { + "target": "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/__init__/max_no_improvement", + "defaultType": "number", + "defaultValue": 10.0 + }, + "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/fit/y": { + "target": "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/fit/y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/fit/sample_weight": { + "target": "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/fit/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/partial_fit/y": { + "target": "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/partial_fit/y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/partial_fit/sample_weight": { + "target": "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/partial_fit/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.cluster._kmeans/_BaseKMeans/fit_predict/y": { + "target": "sklearn/sklearn.cluster._kmeans/_BaseKMeans/fit_predict/y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.cluster._kmeans/_BaseKMeans/fit_predict/sample_weight": { + "target": "sklearn/sklearn.cluster._kmeans/_BaseKMeans/fit_predict/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.cluster._kmeans/_BaseKMeans/fit_transform/sample_weight": { + "target": "sklearn/sklearn.cluster._kmeans/_BaseKMeans/fit_transform/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.cluster._kmeans/_BaseKMeans/predict/sample_weight": { + "target": "sklearn/sklearn.cluster._kmeans/_BaseKMeans/predict/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.cluster._kmeans/_BaseKMeans/score/y": { + "target": "sklearn/sklearn.cluster._kmeans/_BaseKMeans/score/y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.cluster._kmeans/_BaseKMeans/score/sample_weight": { + "target": "sklearn/sklearn.cluster._kmeans/_BaseKMeans/score/sample_weight", + "defaultType": "none", + "defaultValue": null + }, "sklearn/sklearn.cluster._kmeans/k_means/sample_weight": { "target": "sklearn/sklearn.cluster._kmeans/k_means/sample_weight", "defaultType": "none", @@ -157,8 +377,8 @@ }, "sklearn/sklearn.cluster._kmeans/k_means/n_init": { "target": "sklearn/sklearn.cluster._kmeans/k_means/n_init", - "defaultType": "string", - "defaultValue": "warn" + "defaultType": "number", + "defaultValue": 10.0 }, "sklearn/sklearn.cluster._kmeans/k_means/verbose": { "target": "sklearn/sklearn.cluster._kmeans/k_means/verbose", @@ -185,6 +405,31 @@ "defaultType": "boolean", "defaultValue": false }, + "sklearn/sklearn.cluster._mean_shift/MeanShift/__init__/seeds": { + "target": "sklearn/sklearn.cluster._mean_shift/MeanShift/__init__/seeds", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.cluster._mean_shift/MeanShift/__init__/cluster_all": { + "target": "sklearn/sklearn.cluster._mean_shift/MeanShift/__init__/cluster_all", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.cluster._mean_shift/MeanShift/__init__/n_jobs": { + "target": "sklearn/sklearn.cluster._mean_shift/MeanShift/__init__/n_jobs", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.cluster._mean_shift/MeanShift/__init__/max_iter": { + "target": "sklearn/sklearn.cluster._mean_shift/MeanShift/__init__/max_iter", + "defaultType": "number", + "defaultValue": 300.0 + }, + "sklearn/sklearn.cluster._mean_shift/MeanShift/fit/y": { + "target": "sklearn/sklearn.cluster._mean_shift/MeanShift/fit/y", + "defaultType": "none", + "defaultValue": null + }, "sklearn/sklearn.cluster._mean_shift/estimate_bandwidth/random_state": { "target": "sklearn/sklearn.cluster._mean_shift/estimate_bandwidth/random_state", "defaultType": "number", @@ -195,104 +440,454 @@ "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.compose._column_transformer/make_column_transformer/sparse_threshold": { - "target": "sklearn/sklearn.compose._column_transformer/make_column_transformer/sparse_threshold", + "sklearn/sklearn.cluster._optics/OPTICS/__init__/p": { + "target": "sklearn/sklearn.cluster._optics/OPTICS/__init__/p", "defaultType": "number", - "defaultValue": 0.3 + "defaultValue": 2.0 }, - "sklearn/sklearn.compose._column_transformer/make_column_transformer/n_jobs": { - "target": "sklearn/sklearn.compose._column_transformer/make_column_transformer/n_jobs", + "sklearn/sklearn.cluster._optics/OPTICS/__init__/metric_params": { + "target": "sklearn/sklearn.cluster._optics/OPTICS/__init__/metric_params", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.compose._column_transformer/make_column_transformer/verbose": { - "target": "sklearn/sklearn.compose._column_transformer/make_column_transformer/verbose", - "defaultType": "boolean", - "defaultValue": false + "sklearn/sklearn.cluster._optics/OPTICS/__init__/cluster_method": { + "target": "sklearn/sklearn.cluster._optics/OPTICS/__init__/cluster_method", + "defaultType": "string", + "defaultValue": "xi" }, - "sklearn/sklearn.compose._column_transformer/make_column_transformer/verbose_feature_names_out": { - "target": "sklearn/sklearn.compose._column_transformer/make_column_transformer/verbose_feature_names_out", + "sklearn/sklearn.cluster._optics/OPTICS/__init__/eps": { + "target": "sklearn/sklearn.cluster._optics/OPTICS/__init__/eps", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.cluster._optics/OPTICS/__init__/predecessor_correction": { + "target": "sklearn/sklearn.cluster._optics/OPTICS/__init__/predecessor_correction", "defaultType": "boolean", "defaultValue": true }, - "sklearn/sklearn.covariance._shrunk_covariance/ledoit_wolf/assume_centered": { - "target": "sklearn/sklearn.covariance._shrunk_covariance/ledoit_wolf/assume_centered", - "defaultType": "boolean", - "defaultValue": false + "sklearn/sklearn.cluster._optics/OPTICS/__init__/algorithm": { + "target": "sklearn/sklearn.cluster._optics/OPTICS/__init__/algorithm", + "defaultType": "string", + "defaultValue": "auto" }, - "sklearn/sklearn.covariance._shrunk_covariance/ledoit_wolf/block_size": { - "target": "sklearn/sklearn.covariance._shrunk_covariance/ledoit_wolf/block_size", + "sklearn/sklearn.cluster._optics/OPTICS/__init__/leaf_size": { + "target": "sklearn/sklearn.cluster._optics/OPTICS/__init__/leaf_size", "defaultType": "number", - "defaultValue": 1000.0 + "defaultValue": 30.0 }, - "sklearn/sklearn.datasets._base/load_breast_cancer/as_frame": { - "target": "sklearn/sklearn.datasets._base/load_breast_cancer/as_frame", - "defaultType": "boolean", - "defaultValue": false + "sklearn/sklearn.cluster._optics/OPTICS/__init__/memory": { + "target": "sklearn/sklearn.cluster._optics/OPTICS/__init__/memory", + "defaultType": "none", + "defaultValue": null }, - "sklearn/sklearn.datasets._base/load_diabetes/return_X_y": { - "target": "sklearn/sklearn.datasets._base/load_diabetes/return_X_y", - "defaultType": "boolean", - "defaultValue": false + "sklearn/sklearn.cluster._optics/OPTICS/__init__/n_jobs": { + "target": "sklearn/sklearn.cluster._optics/OPTICS/__init__/n_jobs", + "defaultType": "none", + "defaultValue": null }, - "sklearn/sklearn.datasets._base/load_diabetes/as_frame": { - "target": "sklearn/sklearn.datasets._base/load_diabetes/as_frame", - "defaultType": "boolean", - "defaultValue": false + "sklearn/sklearn.cluster._optics/OPTICS/fit/y": { + "target": "sklearn/sklearn.cluster._optics/OPTICS/fit/y", + "defaultType": "none", + "defaultValue": null }, - "sklearn/sklearn.datasets._base/load_diabetes/scaled": { - "target": "sklearn/sklearn.datasets._base/load_diabetes/scaled", - "defaultType": "boolean", - "defaultValue": true + "sklearn/sklearn.cluster._spectral/SpectralClustering/__init__/n_components": { + "target": "sklearn/sklearn.cluster._spectral/SpectralClustering/__init__/n_components", + "defaultType": "none", + "defaultValue": null }, - "sklearn/sklearn.datasets._base/load_digits/n_class": { - "target": "sklearn/sklearn.datasets._base/load_digits/n_class", + "sklearn/sklearn.cluster._spectral/SpectralClustering/__init__/n_init": { + "target": "sklearn/sklearn.cluster._spectral/SpectralClustering/__init__/n_init", "defaultType": "number", "defaultValue": 10.0 }, - "sklearn/sklearn.datasets._base/load_digits/as_frame": { - "target": "sklearn/sklearn.datasets._base/load_digits/as_frame", + "sklearn/sklearn.cluster._spectral/SpectralClustering/__init__/gamma": { + "target": "sklearn/sklearn.cluster._spectral/SpectralClustering/__init__/gamma", + "defaultType": "number", + "defaultValue": 1.0 + }, + "sklearn/sklearn.cluster._spectral/SpectralClustering/__init__/n_neighbors": { + "target": "sklearn/sklearn.cluster._spectral/SpectralClustering/__init__/n_neighbors", + "defaultType": "number", + "defaultValue": 10.0 + }, + "sklearn/sklearn.cluster._spectral/SpectralClustering/__init__/eigen_tol": { + "target": "sklearn/sklearn.cluster._spectral/SpectralClustering/__init__/eigen_tol", + "defaultType": "number", + "defaultValue": 0.0 + }, + "sklearn/sklearn.cluster._spectral/SpectralClustering/__init__/degree": { + "target": "sklearn/sklearn.cluster._spectral/SpectralClustering/__init__/degree", + "defaultType": "number", + "defaultValue": 3.0 + }, + "sklearn/sklearn.cluster._spectral/SpectralClustering/__init__/coef0": { + "target": "sklearn/sklearn.cluster._spectral/SpectralClustering/__init__/coef0", + "defaultType": "number", + "defaultValue": 1.0 + }, + "sklearn/sklearn.cluster._spectral/SpectralClustering/__init__/kernel_params": { + "target": "sklearn/sklearn.cluster._spectral/SpectralClustering/__init__/kernel_params", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.cluster._spectral/SpectralClustering/__init__/n_jobs": { + "target": "sklearn/sklearn.cluster._spectral/SpectralClustering/__init__/n_jobs", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.cluster._spectral/SpectralClustering/__init__/verbose": { + "target": "sklearn/sklearn.cluster._spectral/SpectralClustering/__init__/verbose", "defaultType": "boolean", "defaultValue": false }, - "sklearn/sklearn.datasets._base/load_files/description": { - "target": "sklearn/sklearn.datasets._base/load_files/description", + "sklearn/sklearn.cluster._spectral/SpectralClustering/fit/y": { + "target": "sklearn/sklearn.cluster._spectral/SpectralClustering/fit/y", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.datasets._base/load_files/categories": { - "target": "sklearn/sklearn.datasets._base/load_files/categories", + "sklearn/sklearn.cluster._spectral/SpectralClustering/fit_predict/y": { + "target": "sklearn/sklearn.cluster._spectral/SpectralClustering/fit_predict/y", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.datasets._base/load_files/load_content": { - "target": "sklearn/sklearn.datasets._base/load_files/load_content", + "sklearn/sklearn.compose._column_transformer/ColumnTransformer/__init__/verbose_feature_names_out": { + "target": "sklearn/sklearn.compose._column_transformer/ColumnTransformer/__init__/verbose_feature_names_out", "defaultType": "boolean", "defaultValue": true }, - "sklearn/sklearn.datasets._base/load_files/shuffle": { - "target": "sklearn/sklearn.datasets._base/load_files/shuffle", + "sklearn/sklearn.compose._column_transformer/ColumnTransformer/get_params/deep": { + "target": "sklearn/sklearn.compose._column_transformer/ColumnTransformer/get_params/deep", "defaultType": "boolean", "defaultValue": true }, - "sklearn/sklearn.datasets._base/load_files/decode_error": { - "target": "sklearn/sklearn.datasets._base/load_files/decode_error", - "defaultType": "string", - "defaultValue": "strict" - }, - "sklearn/sklearn.datasets._base/load_files/random_state": { - "target": "sklearn/sklearn.datasets._base/load_files/random_state", - "defaultType": "number", - "defaultValue": 0.0 - }, - "sklearn/sklearn.datasets._base/load_files/allowed_extensions": { - "target": "sklearn/sklearn.datasets._base/load_files/allowed_extensions", + "sklearn/sklearn.compose._column_transformer/make_column_selector/__init__/pattern": { + "target": "sklearn/sklearn.compose._column_transformer/make_column_selector/__init__/pattern", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.datasets._base/load_iris/as_frame": { - "target": "sklearn/sklearn.datasets._base/load_iris/as_frame", - "defaultType": "boolean", + "sklearn/sklearn.compose._column_transformer/make_column_transformer/sparse_threshold": { + "target": "sklearn/sklearn.compose._column_transformer/make_column_transformer/sparse_threshold", + "defaultType": "number", + "defaultValue": 0.3 + }, + "sklearn/sklearn.compose._column_transformer/make_column_transformer/n_jobs": { + "target": "sklearn/sklearn.compose._column_transformer/make_column_transformer/n_jobs", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.compose._column_transformer/make_column_transformer/verbose": { + "target": "sklearn/sklearn.compose._column_transformer/make_column_transformer/verbose", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.compose._column_transformer/make_column_transformer/verbose_feature_names_out": { + "target": "sklearn/sklearn.compose._column_transformer/make_column_transformer/verbose_feature_names_out", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.covariance._elliptic_envelope/EllipticEnvelope/__init__/store_precision": { + "target": "sklearn/sklearn.covariance._elliptic_envelope/EllipticEnvelope/__init__/store_precision", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.covariance._elliptic_envelope/EllipticEnvelope/__init__/assume_centered": { + "target": "sklearn/sklearn.covariance._elliptic_envelope/EllipticEnvelope/__init__/assume_centered", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.covariance._elliptic_envelope/EllipticEnvelope/__init__/support_fraction": { + "target": "sklearn/sklearn.covariance._elliptic_envelope/EllipticEnvelope/__init__/support_fraction", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.covariance._elliptic_envelope/EllipticEnvelope/fit/y": { + "target": "sklearn/sklearn.covariance._elliptic_envelope/EllipticEnvelope/fit/y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.covariance._empirical_covariance/EmpiricalCovariance/fit/y": { + "target": "sklearn/sklearn.covariance._empirical_covariance/EmpiricalCovariance/fit/y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.covariance._graph_lasso/GraphicalLasso/__init__/mode": { + "target": "sklearn/sklearn.covariance._graph_lasso/GraphicalLasso/__init__/mode", + "defaultType": "string", + "defaultValue": "cd" + }, + "sklearn/sklearn.covariance._graph_lasso/GraphicalLasso/__init__/tol": { + "target": "sklearn/sklearn.covariance._graph_lasso/GraphicalLasso/__init__/tol", + "defaultType": "number", + "defaultValue": 0.0001 + }, + "sklearn/sklearn.covariance._graph_lasso/GraphicalLasso/__init__/enet_tol": { + "target": "sklearn/sklearn.covariance._graph_lasso/GraphicalLasso/__init__/enet_tol", + "defaultType": "number", + "defaultValue": 0.0001 + }, + "sklearn/sklearn.covariance._graph_lasso/GraphicalLasso/__init__/verbose": { + "target": "sklearn/sklearn.covariance._graph_lasso/GraphicalLasso/__init__/verbose", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.covariance._graph_lasso/GraphicalLasso/__init__/assume_centered": { + "target": "sklearn/sklearn.covariance._graph_lasso/GraphicalLasso/__init__/assume_centered", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.covariance._graph_lasso/GraphicalLasso/fit/y": { + "target": "sklearn/sklearn.covariance._graph_lasso/GraphicalLasso/fit/y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.covariance._graph_lasso/GraphicalLassoCV/__init__/alphas": { + "target": "sklearn/sklearn.covariance._graph_lasso/GraphicalLassoCV/__init__/alphas", + "defaultType": "number", + "defaultValue": 4.0 + }, + "sklearn/sklearn.covariance._graph_lasso/GraphicalLassoCV/__init__/n_refinements": { + "target": "sklearn/sklearn.covariance._graph_lasso/GraphicalLassoCV/__init__/n_refinements", + "defaultType": "number", + "defaultValue": 4.0 + }, + "sklearn/sklearn.covariance._graph_lasso/GraphicalLassoCV/__init__/cv": { + "target": "sklearn/sklearn.covariance._graph_lasso/GraphicalLassoCV/__init__/cv", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.covariance._graph_lasso/GraphicalLassoCV/__init__/tol": { + "target": "sklearn/sklearn.covariance._graph_lasso/GraphicalLassoCV/__init__/tol", + "defaultType": "number", + "defaultValue": 0.0001 + }, + "sklearn/sklearn.covariance._graph_lasso/GraphicalLassoCV/__init__/enet_tol": { + "target": "sklearn/sklearn.covariance._graph_lasso/GraphicalLassoCV/__init__/enet_tol", + "defaultType": "number", + "defaultValue": 0.0001 + }, + "sklearn/sklearn.covariance._graph_lasso/GraphicalLassoCV/__init__/max_iter": { + "target": "sklearn/sklearn.covariance._graph_lasso/GraphicalLassoCV/__init__/max_iter", + "defaultType": "number", + "defaultValue": 100.0 + }, + "sklearn/sklearn.covariance._graph_lasso/GraphicalLassoCV/__init__/mode": { + "target": "sklearn/sklearn.covariance._graph_lasso/GraphicalLassoCV/__init__/mode", + "defaultType": "string", + "defaultValue": "cd" + }, + "sklearn/sklearn.covariance._graph_lasso/GraphicalLassoCV/__init__/n_jobs": { + "target": "sklearn/sklearn.covariance._graph_lasso/GraphicalLassoCV/__init__/n_jobs", + "defaultType": "number", + "defaultValue": -1.0 + }, + "sklearn/sklearn.covariance._graph_lasso/GraphicalLassoCV/__init__/verbose": { + "target": "sklearn/sklearn.covariance._graph_lasso/GraphicalLassoCV/__init__/verbose", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.covariance._graph_lasso/GraphicalLassoCV/__init__/assume_centered": { + "target": "sklearn/sklearn.covariance._graph_lasso/GraphicalLassoCV/__init__/assume_centered", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.covariance._robust_covariance/MinCovDet/__init__/store_precision": { + "target": "sklearn/sklearn.covariance._robust_covariance/MinCovDet/__init__/store_precision", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.covariance._robust_covariance/MinCovDet/__init__/assume_centered": { + "target": "sklearn/sklearn.covariance._robust_covariance/MinCovDet/__init__/assume_centered", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.covariance._robust_covariance/MinCovDet/__init__/random_state": { + "target": "sklearn/sklearn.covariance._robust_covariance/MinCovDet/__init__/random_state", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.covariance._robust_covariance/MinCovDet/fit/y": { + "target": "sklearn/sklearn.covariance._robust_covariance/MinCovDet/fit/y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.covariance._shrunk_covariance/LedoitWolf/__init__/store_precision": { + "target": "sklearn/sklearn.covariance._shrunk_covariance/LedoitWolf/__init__/store_precision", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.covariance._shrunk_covariance/LedoitWolf/__init__/assume_centered": { + "target": "sklearn/sklearn.covariance._shrunk_covariance/LedoitWolf/__init__/assume_centered", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.covariance._shrunk_covariance/LedoitWolf/__init__/block_size": { + "target": "sklearn/sklearn.covariance._shrunk_covariance/LedoitWolf/__init__/block_size", + "defaultType": "number", + "defaultValue": 1000.0 + }, + "sklearn/sklearn.covariance._shrunk_covariance/LedoitWolf/fit/y": { + "target": "sklearn/sklearn.covariance._shrunk_covariance/LedoitWolf/fit/y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.covariance._shrunk_covariance/OAS/fit/y": { + "target": "sklearn/sklearn.covariance._shrunk_covariance/OAS/fit/y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.covariance._shrunk_covariance/ShrunkCovariance/__init__/store_precision": { + "target": "sklearn/sklearn.covariance._shrunk_covariance/ShrunkCovariance/__init__/store_precision", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.covariance._shrunk_covariance/ShrunkCovariance/__init__/assume_centered": { + "target": "sklearn/sklearn.covariance._shrunk_covariance/ShrunkCovariance/__init__/assume_centered", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.covariance._shrunk_covariance/ShrunkCovariance/fit/y": { + "target": "sklearn/sklearn.covariance._shrunk_covariance/ShrunkCovariance/fit/y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.covariance._shrunk_covariance/ledoit_wolf/assume_centered": { + "target": "sklearn/sklearn.covariance._shrunk_covariance/ledoit_wolf/assume_centered", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.covariance._shrunk_covariance/ledoit_wolf/block_size": { + "target": "sklearn/sklearn.covariance._shrunk_covariance/ledoit_wolf/block_size", + "defaultType": "number", + "defaultValue": 1000.0 + }, + "sklearn/sklearn.cross_decomposition._pls/CCA/__init__/n_components": { + "target": "sklearn/sklearn.cross_decomposition._pls/CCA/__init__/n_components", + "defaultType": "number", + "defaultValue": 1.0 + }, + "sklearn/sklearn.cross_decomposition._pls/CCA/__init__/scale": { + "target": "sklearn/sklearn.cross_decomposition._pls/CCA/__init__/scale", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.cross_decomposition._pls/CCA/__init__/max_iter": { + "target": "sklearn/sklearn.cross_decomposition._pls/CCA/__init__/max_iter", + "defaultType": "number", + "defaultValue": 500.0 + }, + "sklearn/sklearn.cross_decomposition._pls/CCA/__init__/tol": { + "target": "sklearn/sklearn.cross_decomposition._pls/CCA/__init__/tol", + "defaultType": "number", + "defaultValue": 1e-6 + }, + "sklearn/sklearn.cross_decomposition._pls/CCA/__init__/copy": { + "target": "sklearn/sklearn.cross_decomposition._pls/CCA/__init__/copy", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.cross_decomposition._pls/PLSRegression/__init__/scale": { + "target": "sklearn/sklearn.cross_decomposition._pls/PLSRegression/__init__/scale", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.cross_decomposition._pls/PLSRegression/__init__/copy": { + "target": "sklearn/sklearn.cross_decomposition._pls/PLSRegression/__init__/copy", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.cross_decomposition._pls/PLSSVD/__init__/n_components": { + "target": "sklearn/sklearn.cross_decomposition._pls/PLSSVD/__init__/n_components", + "defaultType": "number", + "defaultValue": 10.0 + }, + "sklearn/sklearn.cross_decomposition._pls/PLSSVD/__init__/scale": { + "target": "sklearn/sklearn.cross_decomposition._pls/PLSSVD/__init__/scale", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.cross_decomposition._pls/PLSSVD/__init__/copy": { + "target": "sklearn/sklearn.cross_decomposition._pls/PLSSVD/__init__/copy", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.cross_decomposition._pls/_PLS/predict/copy": { + "target": "sklearn/sklearn.cross_decomposition._pls/_PLS/predict/copy", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.cross_decomposition._pls/_PLS/transform/copy": { + "target": "sklearn/sklearn.cross_decomposition._pls/_PLS/transform/copy", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.datasets._base/load_breast_cancer/as_frame": { + "target": "sklearn/sklearn.datasets._base/load_breast_cancer/as_frame", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.datasets._base/load_diabetes/return_X_y": { + "target": "sklearn/sklearn.datasets._base/load_diabetes/return_X_y", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.datasets._base/load_diabetes/as_frame": { + "target": "sklearn/sklearn.datasets._base/load_diabetes/as_frame", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.datasets._base/load_diabetes/scaled": { + "target": "sklearn/sklearn.datasets._base/load_diabetes/scaled", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.datasets._base/load_digits/n_class": { + "target": "sklearn/sklearn.datasets._base/load_digits/n_class", + "defaultType": "number", + "defaultValue": 10.0 + }, + "sklearn/sklearn.datasets._base/load_digits/as_frame": { + "target": "sklearn/sklearn.datasets._base/load_digits/as_frame", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.datasets._base/load_files/description": { + "target": "sklearn/sklearn.datasets._base/load_files/description", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.datasets._base/load_files/categories": { + "target": "sklearn/sklearn.datasets._base/load_files/categories", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.datasets._base/load_files/load_content": { + "target": "sklearn/sklearn.datasets._base/load_files/load_content", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.datasets._base/load_files/shuffle": { + "target": "sklearn/sklearn.datasets._base/load_files/shuffle", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.datasets._base/load_files/decode_error": { + "target": "sklearn/sklearn.datasets._base/load_files/decode_error", + "defaultType": "string", + "defaultValue": "strict" + }, + "sklearn/sklearn.datasets._base/load_files/random_state": { + "target": "sklearn/sklearn.datasets._base/load_files/random_state", + "defaultType": "number", + "defaultValue": 0.0 + }, + "sklearn/sklearn.datasets._base/load_files/allowed_extensions": { + "target": "sklearn/sklearn.datasets._base/load_files/allowed_extensions", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.datasets._base/load_iris/as_frame": { + "target": "sklearn/sklearn.datasets._base/load_iris/as_frame", + "defaultType": "boolean", "defaultValue": false }, "sklearn/sklearn.datasets._base/load_sample_image/image_name": { @@ -350,11 +945,6 @@ "defaultType": "number", "defaultValue": 1.0 }, - "sklearn/sklearn.datasets._openml/fetch_openml/parser": { - "target": "sklearn/sklearn.datasets._openml/fetch_openml/parser", - "defaultType": "string", - "defaultValue": "warn" - }, "sklearn/sklearn.datasets._samples_generator/make_blobs/shuffle": { "target": "sklearn/sklearn.datasets._samples_generator/make_blobs/shuffle", "defaultType": "boolean", @@ -545,1040 +1135,3855 @@ "defaultType": "boolean", "defaultValue": false }, - "sklearn/sklearn.discriminant_analysis/LinearDiscriminantAnalysis/__init__/priors": { - "target": "sklearn/sklearn.discriminant_analysis/LinearDiscriminantAnalysis/__init__/priors", - "defaultType": "none", - "defaultValue": null + "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__/alpha": { + "target": "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__/alpha", + "defaultType": "number", + "defaultValue": 0.1 }, - "sklearn/sklearn.discriminant_analysis/LinearDiscriminantAnalysis/__init__/store_covariance": { - "target": "sklearn/sklearn.discriminant_analysis/LinearDiscriminantAnalysis/__init__/store_covariance", - "defaultType": "boolean", - "defaultValue": false + "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__/n_iter": { + "target": "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__/n_iter", + "defaultType": "number", + "defaultValue": 50.0 }, - "sklearn/sklearn.discriminant_analysis/LinearDiscriminantAnalysis/__init__/covariance_estimator": { - "target": "sklearn/sklearn.discriminant_analysis/LinearDiscriminantAnalysis/__init__/covariance_estimator", + "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__/max_iter": { + "target": "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__/max_iter", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.dummy/DummyClassifier/fit/sample_weight": { - "target": "sklearn/sklearn.dummy/DummyClassifier/fit/sample_weight", - "defaultType": "none", - "defaultValue": null + "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__/fit_algorithm": { + "target": "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__/fit_algorithm", + "defaultType": "string", + "defaultValue": "lars" }, - "sklearn/sklearn.dummy/DummyClassifier/score/sample_weight": { - "target": "sklearn/sklearn.dummy/DummyClassifier/score/sample_weight", + "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__/n_jobs": { + "target": "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__/n_jobs", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.dummy/DummyRegressor/__init__/quantile": { - "target": "sklearn/sklearn.dummy/DummyRegressor/__init__/quantile", - "defaultType": "none", - "defaultValue": null - }, - "sklearn/sklearn.dummy/DummyRegressor/fit/sample_weight": { - "target": "sklearn/sklearn.dummy/DummyRegressor/fit/sample_weight", - "defaultType": "none", - "defaultValue": null + "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__/batch_size": { + "target": "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__/batch_size", + "defaultType": "number", + "defaultValue": 3.0 }, - "sklearn/sklearn.dummy/DummyRegressor/predict/return_std": { - "target": "sklearn/sklearn.dummy/DummyRegressor/predict/return_std", + "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__/shuffle": { + "target": "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__/shuffle", "defaultType": "boolean", - "defaultValue": false + "defaultValue": true }, - "sklearn/sklearn.dummy/DummyRegressor/score/sample_weight": { - "target": "sklearn/sklearn.dummy/DummyRegressor/score/sample_weight", + "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__/dict_init": { + "target": "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__/dict_init", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.feature_extraction.image/PatchExtractor/__init__/random_state": { - "target": "sklearn/sklearn.feature_extraction.image/PatchExtractor/__init__/random_state", - "defaultType": "number", - "defaultValue": 2016.0 - }, - "sklearn/sklearn.feature_extraction.text/HashingVectorizer/__init__/input": { - "target": "sklearn/sklearn.feature_extraction.text/HashingVectorizer/__init__/input", - "defaultType": "string", - "defaultValue": "content" - }, - "sklearn/sklearn.feature_extraction.text/HashingVectorizer/__init__/encoding": { - "target": "sklearn/sklearn.feature_extraction.text/HashingVectorizer/__init__/encoding", + "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__/transform_algorithm": { + "target": "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__/transform_algorithm", "defaultType": "string", - "defaultValue": "utf-8" + "defaultValue": "omp" }, - "sklearn/sklearn.feature_extraction.text/HashingVectorizer/fit/y": { - "target": "sklearn/sklearn.feature_extraction.text/HashingVectorizer/fit/y", - "defaultType": "none", - "defaultValue": null - }, - "sklearn/sklearn.feature_extraction.text/HashingVectorizer/fit_transform/y": { - "target": "sklearn/sklearn.feature_extraction.text/HashingVectorizer/fit_transform/y", + "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__/transform_n_nonzero_coefs": { + "target": "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__/transform_n_nonzero_coefs", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.feature_extraction.text/TfidfTransformer/fit/y": { - "target": "sklearn/sklearn.feature_extraction.text/TfidfTransformer/fit/y", + "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__/transform_alpha": { + "target": "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__/transform_alpha", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.feature_extraction.text/TfidfVectorizer/__init__/input": { - "target": "sklearn/sklearn.feature_extraction.text/TfidfVectorizer/__init__/input", - "defaultType": "string", - "defaultValue": "content" - }, - "sklearn/sklearn.feature_selection._mutual_info/mutual_info_classif/n_neighbors": { - "target": "sklearn/sklearn.feature_selection._mutual_info/mutual_info_classif/n_neighbors", - "defaultType": "number", - "defaultValue": 3.0 - }, - "sklearn/sklearn.feature_selection._mutual_info/mutual_info_classif/copy": { - "target": "sklearn/sklearn.feature_selection._mutual_info/mutual_info_classif/copy", + "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__/verbose": { + "target": "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__/verbose", "defaultType": "boolean", - "defaultValue": true + "defaultValue": false }, - "sklearn/sklearn.feature_selection._mutual_info/mutual_info_regression/copy": { - "target": "sklearn/sklearn.feature_selection._mutual_info/mutual_info_regression/copy", + "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__/split_sign": { + "target": "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__/split_sign", "defaultType": "boolean", - "defaultValue": true + "defaultValue": false }, - "sklearn/sklearn.feature_selection._univariate_selection/f_regression/center": { - "target": "sklearn/sklearn.feature_selection._univariate_selection/f_regression/center", - "defaultType": "boolean", - "defaultValue": true + "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__/random_state": { + "target": "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__/random_state", + "defaultType": "number", + "defaultValue": 42.0 }, - "sklearn/sklearn.feature_selection._univariate_selection/f_regression/force_finite": { - "target": "sklearn/sklearn.feature_selection._univariate_selection/f_regression/force_finite", + "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__/positive_code": { + "target": "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__/positive_code", "defaultType": "boolean", - "defaultValue": true + "defaultValue": false }, - "sklearn/sklearn.gaussian_process.kernels/ConstantKernel/__init__/constant_value": { - "target": "sklearn/sklearn.gaussian_process.kernels/ConstantKernel/__init__/constant_value", - "defaultType": "number", - "defaultValue": 1.0 + "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__/positive_dict": { + "target": "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__/positive_dict", + "defaultType": "boolean", + "defaultValue": false }, - "sklearn/sklearn.gaussian_process.kernels/DotProduct/__init__/sigma_0": { - "target": "sklearn/sklearn.gaussian_process.kernels/DotProduct/__init__/sigma_0", + "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__/transform_max_iter": { + "target": "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__/transform_max_iter", "defaultType": "number", - "defaultValue": 1.0 + "defaultValue": 1000.0 }, - "sklearn/sklearn.gaussian_process.kernels/Matern/__init__/length_scale": { - "target": "sklearn/sklearn.gaussian_process.kernels/Matern/__init__/length_scale", - "defaultType": "number", - "defaultValue": 1.0 + "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__/callback": { + "target": "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__/callback", + "defaultType": "none", + "defaultValue": null }, - "sklearn/sklearn.gaussian_process.kernels/Matern/__init__/nu": { - "target": "sklearn/sklearn.gaussian_process.kernels/Matern/__init__/nu", + "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__/tol": { + "target": "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__/tol", "defaultType": "number", - "defaultValue": 1.5 + "defaultValue": 0.001 }, - "sklearn/sklearn.gaussian_process.kernels/RationalQuadratic/__init__/length_scale": { - "target": "sklearn/sklearn.gaussian_process.kernels/RationalQuadratic/__init__/length_scale", + "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__/max_no_improvement": { + "target": "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__/max_no_improvement", "defaultType": "number", - "defaultValue": 1.0 + "defaultValue": 10.0 }, - "sklearn/sklearn.gaussian_process.kernels/RationalQuadratic/__init__/alpha": { - "target": "sklearn/sklearn.gaussian_process.kernels/RationalQuadratic/__init__/alpha", + "sklearn/sklearn.decomposition._factor_analysis/FactorAnalysis/__init__/tol": { + "target": "sklearn/sklearn.decomposition._factor_analysis/FactorAnalysis/__init__/tol", "defaultType": "number", - "defaultValue": 1.0 + "defaultValue": 0.01 }, - "sklearn/sklearn.inspection._partial_dependence/partial_dependence/response_method": { - "target": "sklearn/sklearn.inspection._partial_dependence/partial_dependence/response_method", - "defaultType": "string", - "defaultValue": "auto" + "sklearn/sklearn.decomposition._factor_analysis/FactorAnalysis/__init__/copy": { + "target": "sklearn/sklearn.decomposition._factor_analysis/FactorAnalysis/__init__/copy", + "defaultType": "boolean", + "defaultValue": true }, - "sklearn/sklearn.inspection._partial_dependence/partial_dependence/grid_resolution": { - "target": "sklearn/sklearn.inspection._partial_dependence/partial_dependence/grid_resolution", + "sklearn/sklearn.decomposition._factor_analysis/FactorAnalysis/__init__/max_iter": { + "target": "sklearn/sklearn.decomposition._factor_analysis/FactorAnalysis/__init__/max_iter", "defaultType": "number", - "defaultValue": 50.0 - }, - "sklearn/sklearn.inspection._partial_dependence/partial_dependence/method": { - "target": "sklearn/sklearn.inspection._partial_dependence/partial_dependence/method", - "defaultType": "string", - "defaultValue": "auto" - }, - "sklearn/sklearn.inspection._partial_dependence/partial_dependence/kind": { - "target": "sklearn/sklearn.inspection._partial_dependence/partial_dependence/kind", - "defaultType": "string", - "defaultValue": "average" + "defaultValue": 1000.0 }, - "sklearn/sklearn.inspection._permutation_importance/permutation_importance/sample_weight": { - "target": "sklearn/sklearn.inspection._permutation_importance/permutation_importance/sample_weight", + "sklearn/sklearn.decomposition._factor_analysis/FactorAnalysis/__init__/noise_variance_init": { + "target": "sklearn/sklearn.decomposition._factor_analysis/FactorAnalysis/__init__/noise_variance_init", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.inspection._permutation_importance/permutation_importance/max_samples": { - "target": "sklearn/sklearn.inspection._permutation_importance/permutation_importance/max_samples", + "sklearn/sklearn.decomposition._factor_analysis/FactorAnalysis/__init__/svd_method": { + "target": "sklearn/sklearn.decomposition._factor_analysis/FactorAnalysis/__init__/svd_method", + "defaultType": "string", + "defaultValue": "randomized" + }, + "sklearn/sklearn.decomposition._factor_analysis/FactorAnalysis/__init__/iterated_power": { + "target": "sklearn/sklearn.decomposition._factor_analysis/FactorAnalysis/__init__/iterated_power", "defaultType": "number", - "defaultValue": 1.0 + "defaultValue": 3.0 }, - "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/feature_names": { - "target": "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/feature_names", + "sklearn/sklearn.decomposition._factor_analysis/FactorAnalysis/fit/y": { + "target": "sklearn/sklearn.decomposition._factor_analysis/FactorAnalysis/fit/y", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/response_method": { - "target": "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/response_method", + "sklearn/sklearn.decomposition._fastica/FastICA/__init__/algorithm": { + "target": "sklearn/sklearn.decomposition._fastica/FastICA/__init__/algorithm", "defaultType": "string", - "defaultValue": "auto" - }, - "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/n_cols": { - "target": "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/n_cols", - "defaultType": "number", - "defaultValue": 3.0 + "defaultValue": "parallel" }, - "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/method": { - "target": "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/method", + "sklearn/sklearn.decomposition._fastica/FastICA/__init__/fun": { + "target": "sklearn/sklearn.decomposition._fastica/FastICA/__init__/fun", "defaultType": "string", - "defaultValue": "auto" + "defaultValue": "logcosh" }, - "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/n_jobs": { - "target": "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/n_jobs", + "sklearn/sklearn.decomposition._fastica/FastICA/__init__/fun_args": { + "target": "sklearn/sklearn.decomposition._fastica/FastICA/__init__/fun_args", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/verbose": { - "target": "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/verbose", - "defaultType": "number", - "defaultValue": 0.0 - }, - "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/line_kw": { - "target": "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/line_kw", + "sklearn/sklearn.decomposition._fastica/FastICA/__init__/w_init": { + "target": "sklearn/sklearn.decomposition._fastica/FastICA/__init__/w_init", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/ice_lines_kw": { - "target": "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/ice_lines_kw", + "sklearn/sklearn.decomposition._fastica/FastICA/fit/y": { + "target": "sklearn/sklearn.decomposition._fastica/FastICA/fit/y", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/pd_line_kw": { - "target": "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/pd_line_kw", + "sklearn/sklearn.decomposition._fastica/FastICA/fit_transform/y": { + "target": "sklearn/sklearn.decomposition._fastica/FastICA/fit_transform/y", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/contour_kw": { - "target": "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/contour_kw", - "defaultType": "none", - "defaultValue": null + "sklearn/sklearn.decomposition._fastica/FastICA/inverse_transform/copy": { + "target": "sklearn/sklearn.decomposition._fastica/FastICA/inverse_transform/copy", + "defaultType": "boolean", + "defaultValue": true }, - "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/kind": { - "target": "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/kind", - "defaultType": "string", - "defaultValue": "average" + "sklearn/sklearn.decomposition._fastica/FastICA/transform/copy": { + "target": "sklearn/sklearn.decomposition._fastica/FastICA/transform/copy", + "defaultType": "boolean", + "defaultValue": true }, - "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/subsample": { - "target": "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/subsample", - "defaultType": "number", - "defaultValue": 1000.0 + "sklearn/sklearn.decomposition._incremental_pca/IncrementalPCA/__init__/whiten": { + "target": "sklearn/sklearn.decomposition._incremental_pca/IncrementalPCA/__init__/whiten", + "defaultType": "boolean", + "defaultValue": false }, - "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/random_state": { - "target": "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/random_state", + "sklearn/sklearn.decomposition._incremental_pca/IncrementalPCA/fit/y": { + "target": "sklearn/sklearn.decomposition._incremental_pca/IncrementalPCA/fit/y", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/centered": { - "target": "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/centered", - "defaultType": "boolean", - "defaultValue": false + "sklearn/sklearn.decomposition._incremental_pca/IncrementalPCA/partial_fit/y": { + "target": "sklearn/sklearn.decomposition._incremental_pca/IncrementalPCA/partial_fit/y", + "defaultType": "none", + "defaultValue": null }, - "sklearn/sklearn.isotonic/IsotonicRegression/__init__/increasing": { - "target": "sklearn/sklearn.isotonic/IsotonicRegression/__init__/increasing", + "sklearn/sklearn.decomposition._incremental_pca/IncrementalPCA/partial_fit/check_input": { + "target": "sklearn/sklearn.decomposition._incremental_pca/IncrementalPCA/partial_fit/check_input", "defaultType": "boolean", "defaultValue": true }, - "sklearn/sklearn.isotonic/IsotonicRegression/fit/sample_weight": { - "target": "sklearn/sklearn.isotonic/IsotonicRegression/fit/sample_weight", + "sklearn/sklearn.decomposition._kernel_pca/KernelPCA/__init__/degree": { + "target": "sklearn/sklearn.decomposition._kernel_pca/KernelPCA/__init__/degree", + "defaultType": "number", + "defaultValue": 3.0 + }, + "sklearn/sklearn.decomposition._kernel_pca/KernelPCA/__init__/coef0": { + "target": "sklearn/sklearn.decomposition._kernel_pca/KernelPCA/__init__/coef0", + "defaultType": "number", + "defaultValue": 1.0 + }, + "sklearn/sklearn.decomposition._kernel_pca/KernelPCA/__init__/kernel_params": { + "target": "sklearn/sklearn.decomposition._kernel_pca/KernelPCA/__init__/kernel_params", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.kernel_approximation/Nystroem/__init__/kernel": { - "target": "sklearn/sklearn.kernel_approximation/Nystroem/__init__/kernel", + "sklearn/sklearn.decomposition._kernel_pca/KernelPCA/__init__/eigen_solver": { + "target": "sklearn/sklearn.decomposition._kernel_pca/KernelPCA/__init__/eigen_solver", "defaultType": "string", - "defaultValue": "rbf" + "defaultValue": "auto" }, - "sklearn/sklearn.kernel_approximation/Nystroem/__init__/gamma": { - "target": "sklearn/sklearn.kernel_approximation/Nystroem/__init__/gamma", + "sklearn/sklearn.decomposition._kernel_pca/KernelPCA/__init__/tol": { + "target": "sklearn/sklearn.decomposition._kernel_pca/KernelPCA/__init__/tol", + "defaultType": "number", + "defaultValue": 0.0 + }, + "sklearn/sklearn.decomposition._kernel_pca/KernelPCA/__init__/max_iter": { + "target": "sklearn/sklearn.decomposition._kernel_pca/KernelPCA/__init__/max_iter", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.kernel_approximation/Nystroem/__init__/coef0": { - "target": "sklearn/sklearn.kernel_approximation/Nystroem/__init__/coef0", + "sklearn/sklearn.decomposition._kernel_pca/KernelPCA/__init__/iterated_power": { + "target": "sklearn/sklearn.decomposition._kernel_pca/KernelPCA/__init__/iterated_power", + "defaultType": "string", + "defaultValue": "auto" + }, + "sklearn/sklearn.decomposition._kernel_pca/KernelPCA/__init__/remove_zero_eig": { + "target": "sklearn/sklearn.decomposition._kernel_pca/KernelPCA/__init__/remove_zero_eig", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.decomposition._kernel_pca/KernelPCA/fit/y": { + "target": "sklearn/sklearn.decomposition._kernel_pca/KernelPCA/fit/y", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.kernel_approximation/Nystroem/__init__/degree": { - "target": "sklearn/sklearn.kernel_approximation/Nystroem/__init__/degree", + "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/__init__/doc_topic_prior": { + "target": "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/__init__/doc_topic_prior", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.kernel_approximation/Nystroem/__init__/kernel_params": { - "target": "sklearn/sklearn.kernel_approximation/Nystroem/__init__/kernel_params", + "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/__init__/topic_word_prior": { + "target": "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/__init__/topic_word_prior", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.kernel_approximation/Nystroem/__init__/n_components": { - "target": "sklearn/sklearn.kernel_approximation/Nystroem/__init__/n_components", + "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/__init__/learning_decay": { + "target": "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/__init__/learning_decay", "defaultType": "number", - "defaultValue": 100.0 + "defaultValue": 0.7 }, - "sklearn/sklearn.kernel_approximation/Nystroem/__init__/n_jobs": { - "target": "sklearn/sklearn.kernel_approximation/Nystroem/__init__/n_jobs", - "defaultType": "none", - "defaultValue": null + "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/__init__/batch_size": { + "target": "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/__init__/batch_size", + "defaultType": "number", + "defaultValue": 128.0 }, - "sklearn/sklearn.kernel_approximation/RBFSampler/__init__/n_components": { - "target": "sklearn/sklearn.kernel_approximation/RBFSampler/__init__/n_components", + "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/__init__/evaluate_every": { + "target": "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/__init__/evaluate_every", "defaultType": "number", - "defaultValue": 100.0 + "defaultValue": -1.0 }, - "sklearn/sklearn.kernel_ridge/KernelRidge/__init__/kernel_params": { - "target": "sklearn/sklearn.kernel_ridge/KernelRidge/__init__/kernel_params", - "defaultType": "none", - "defaultValue": null + "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/__init__/total_samples": { + "target": "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/__init__/total_samples", + "defaultType": "number", + "defaultValue": 1000000.0 }, - "sklearn/sklearn.kernel_ridge/KernelRidge/fit/sample_weight": { - "target": "sklearn/sklearn.kernel_ridge/KernelRidge/fit/sample_weight", - "defaultType": "none", - "defaultValue": null + "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/__init__/perp_tol": { + "target": "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/__init__/perp_tol", + "defaultType": "number", + "defaultValue": 0.1 }, - "sklearn/sklearn.manifold._mds/smacof/metric": { - "target": "sklearn/sklearn.manifold._mds/smacof/metric", - "defaultType": "boolean", - "defaultValue": true + "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/__init__/mean_change_tol": { + "target": "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/__init__/mean_change_tol", + "defaultType": "number", + "defaultValue": 0.001 }, - "sklearn/sklearn.manifold._mds/smacof/n_components": { - "target": "sklearn/sklearn.manifold._mds/smacof/n_components", + "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/__init__/max_doc_update_iter": { + "target": "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/__init__/max_doc_update_iter", "defaultType": "number", - "defaultValue": 2.0 + "defaultValue": 100.0 }, - "sklearn/sklearn.manifold._mds/smacof/init": { - "target": "sklearn/sklearn.manifold._mds/smacof/init", + "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/perplexity/sub_sampling": { + "target": "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/perplexity/sub_sampling", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/score/y": { + "target": "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/score/y", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.manifold._mds/smacof/n_init": { - "target": "sklearn/sklearn.manifold._mds/smacof/n_init", + "sklearn/sklearn.decomposition._nmf/NMF/__init__/tol": { + "target": "sklearn/sklearn.decomposition._nmf/NMF/__init__/tol", "defaultType": "number", - "defaultValue": 8.0 + "defaultValue": 0.0001 }, - "sklearn/sklearn.manifold._mds/smacof/n_jobs": { - "target": "sklearn/sklearn.manifold._mds/smacof/n_jobs", - "defaultType": "none", - "defaultValue": null - }, - "sklearn/sklearn.manifold._mds/smacof/max_iter": { - "target": "sklearn/sklearn.manifold._mds/smacof/max_iter", + "sklearn/sklearn.decomposition._nmf/NMF/__init__/alpha_W": { + "target": "sklearn/sklearn.decomposition._nmf/NMF/__init__/alpha_W", "defaultType": "number", - "defaultValue": 300.0 + "defaultValue": 0.0 }, - "sklearn/sklearn.manifold._mds/smacof/verbose": { - "target": "sklearn/sklearn.manifold._mds/smacof/verbose", + "sklearn/sklearn.decomposition._nmf/NMF/__init__/alpha_H": { + "target": "sklearn/sklearn.decomposition._nmf/NMF/__init__/alpha_H", + "defaultType": "string", + "defaultValue": "same" + }, + "sklearn/sklearn.decomposition._nmf/NMF/__init__/verbose": { + "target": "sklearn/sklearn.decomposition._nmf/NMF/__init__/verbose", "defaultType": "number", "defaultValue": 0.0 }, - "sklearn/sklearn.manifold._mds/smacof/eps": { - "target": "sklearn/sklearn.manifold._mds/smacof/eps", - "defaultType": "number", - "defaultValue": 0.001 + "sklearn/sklearn.decomposition._nmf/NMF/__init__/regularization": { + "target": "sklearn/sklearn.decomposition._nmf/NMF/__init__/regularization", + "defaultType": "string", + "defaultValue": "deprecated" }, - "sklearn/sklearn.manifold._mds/smacof/random_state": { - "target": "sklearn/sklearn.manifold._mds/smacof/random_state", + "sklearn/sklearn.decomposition._nmf/NMF/fit/y": { + "target": "sklearn/sklearn.decomposition._nmf/NMF/fit/y", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.manifold._mds/smacof/return_n_iter": { - "target": "sklearn/sklearn.manifold._mds/smacof/return_n_iter", - "defaultType": "boolean", - "defaultValue": false + "sklearn/sklearn.decomposition._nmf/NMF/fit_transform/y": { + "target": "sklearn/sklearn.decomposition._nmf/NMF/fit_transform/y", + "defaultType": "none", + "defaultValue": null }, - "sklearn/sklearn.metrics._classification/balanced_accuracy_score/adjusted": { - "target": "sklearn/sklearn.metrics._classification/balanced_accuracy_score/adjusted", - "defaultType": "boolean", - "defaultValue": false + "sklearn/sklearn.decomposition._nmf/NMF/fit_transform/W": { + "target": "sklearn/sklearn.decomposition._nmf/NMF/fit_transform/W", + "defaultType": "none", + "defaultValue": null }, - "sklearn/sklearn.metrics._classification/brier_score_loss/sample_weight": { - "target": "sklearn/sklearn.metrics._classification/brier_score_loss/sample_weight", + "sklearn/sklearn.decomposition._nmf/NMF/fit_transform/H": { + "target": "sklearn/sklearn.decomposition._nmf/NMF/fit_transform/H", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.metrics._classification/classification_report/sample_weight": { - "target": "sklearn/sklearn.metrics._classification/classification_report/sample_weight", + "sklearn/sklearn.decomposition._pca/PCA/__init__/tol": { + "target": "sklearn/sklearn.decomposition._pca/PCA/__init__/tol", + "defaultType": "number", + "defaultValue": 0.0 + }, + "sklearn/sklearn.decomposition._pca/PCA/__init__/iterated_power": { + "target": "sklearn/sklearn.decomposition._pca/PCA/__init__/iterated_power", + "defaultType": "string", + "defaultValue": "auto" + }, + "sklearn/sklearn.decomposition._pca/PCA/__init__/n_oversamples": { + "target": "sklearn/sklearn.decomposition._pca/PCA/__init__/n_oversamples", + "defaultType": "number", + "defaultValue": 10.0 + }, + "sklearn/sklearn.decomposition._pca/PCA/__init__/power_iteration_normalizer": { + "target": "sklearn/sklearn.decomposition._pca/PCA/__init__/power_iteration_normalizer", + "defaultType": "string", + "defaultValue": "auto" + }, + "sklearn/sklearn.decomposition._sparse_pca/SparsePCA/__init__/alpha": { + "target": "sklearn/sklearn.decomposition._sparse_pca/SparsePCA/__init__/alpha", + "defaultType": "number", + "defaultValue": 1.0 + }, + "sklearn/sklearn.decomposition._sparse_pca/SparsePCA/__init__/ridge_alpha": { + "target": "sklearn/sklearn.decomposition._sparse_pca/SparsePCA/__init__/ridge_alpha", + "defaultType": "number", + "defaultValue": 0.01 + }, + "sklearn/sklearn.decomposition._sparse_pca/SparsePCA/__init__/max_iter": { + "target": "sklearn/sklearn.decomposition._sparse_pca/SparsePCA/__init__/max_iter", + "defaultType": "number", + "defaultValue": 1000.0 + }, + "sklearn/sklearn.decomposition._sparse_pca/SparsePCA/__init__/tol": { + "target": "sklearn/sklearn.decomposition._sparse_pca/SparsePCA/__init__/tol", + "defaultType": "number", + "defaultValue": 1e-8 + }, + "sklearn/sklearn.decomposition._sparse_pca/SparsePCA/__init__/method": { + "target": "sklearn/sklearn.decomposition._sparse_pca/SparsePCA/__init__/method", + "defaultType": "string", + "defaultValue": "lars" + }, + "sklearn/sklearn.decomposition._sparse_pca/SparsePCA/__init__/n_jobs": { + "target": "sklearn/sklearn.decomposition._sparse_pca/SparsePCA/__init__/n_jobs", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.metrics._classification/f1_score/sample_weight": { - "target": "sklearn/sklearn.metrics._classification/f1_score/sample_weight", + "sklearn/sklearn.decomposition._sparse_pca/SparsePCA/__init__/U_init": { + "target": "sklearn/sklearn.decomposition._sparse_pca/SparsePCA/__init__/U_init", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.metrics._classification/fbeta_score/labels": { - "target": "sklearn/sklearn.metrics._classification/fbeta_score/labels", + "sklearn/sklearn.decomposition._sparse_pca/SparsePCA/__init__/V_init": { + "target": "sklearn/sklearn.decomposition._sparse_pca/SparsePCA/__init__/V_init", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.metrics._classification/fbeta_score/pos_label": { - "target": "sklearn/sklearn.metrics._classification/fbeta_score/pos_label", + "sklearn/sklearn.decomposition._sparse_pca/SparsePCA/__init__/verbose": { + "target": "sklearn/sklearn.decomposition._sparse_pca/SparsePCA/__init__/verbose", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.decomposition._truncated_svd/TruncatedSVD/__init__/n_oversamples": { + "target": "sklearn/sklearn.decomposition._truncated_svd/TruncatedSVD/__init__/n_oversamples", "defaultType": "number", - "defaultValue": 1.0 + "defaultValue": 10.0 }, - "sklearn/sklearn.metrics._classification/fbeta_score/sample_weight": { - "target": "sklearn/sklearn.metrics._classification/fbeta_score/sample_weight", + "sklearn/sklearn.decomposition._truncated_svd/TruncatedSVD/__init__/power_iteration_normalizer": { + "target": "sklearn/sklearn.decomposition._truncated_svd/TruncatedSVD/__init__/power_iteration_normalizer", + "defaultType": "string", + "defaultValue": "auto" + }, + "sklearn/sklearn.discriminant_analysis/LinearDiscriminantAnalysis/__init__/priors": { + "target": "sklearn/sklearn.discriminant_analysis/LinearDiscriminantAnalysis/__init__/priors", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.metrics._classification/fbeta_score/zero_division": { - "target": "sklearn/sklearn.metrics._classification/fbeta_score/zero_division", - "defaultType": "string", - "defaultValue": "warn" + "sklearn/sklearn.discriminant_analysis/LinearDiscriminantAnalysis/__init__/store_covariance": { + "target": "sklearn/sklearn.discriminant_analysis/LinearDiscriminantAnalysis/__init__/store_covariance", + "defaultType": "boolean", + "defaultValue": false }, - "sklearn/sklearn.metrics._classification/hamming_loss/sample_weight": { - "target": "sklearn/sklearn.metrics._classification/hamming_loss/sample_weight", + "sklearn/sklearn.discriminant_analysis/LinearDiscriminantAnalysis/__init__/covariance_estimator": { + "target": "sklearn/sklearn.discriminant_analysis/LinearDiscriminantAnalysis/__init__/covariance_estimator", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.metrics._classification/hinge_loss/labels": { - "target": "sklearn/sklearn.metrics._classification/hinge_loss/labels", + "sklearn/sklearn.dummy/DummyClassifier/fit/sample_weight": { + "target": "sklearn/sklearn.dummy/DummyClassifier/fit/sample_weight", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.metrics._classification/hinge_loss/sample_weight": { - "target": "sklearn/sklearn.metrics._classification/hinge_loss/sample_weight", + "sklearn/sklearn.dummy/DummyClassifier/score/sample_weight": { + "target": "sklearn/sklearn.dummy/DummyClassifier/score/sample_weight", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.metrics._classification/jaccard_score/labels": { - "target": "sklearn/sklearn.metrics._classification/jaccard_score/labels", + "sklearn/sklearn.dummy/DummyRegressor/__init__/quantile": { + "target": "sklearn/sklearn.dummy/DummyRegressor/__init__/quantile", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.metrics._classification/jaccard_score/sample_weight": { - "target": "sklearn/sklearn.metrics._classification/jaccard_score/sample_weight", + "sklearn/sklearn.dummy/DummyRegressor/fit/sample_weight": { + "target": "sklearn/sklearn.dummy/DummyRegressor/fit/sample_weight", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.metrics._classification/jaccard_score/zero_division": { - "target": "sklearn/sklearn.metrics._classification/jaccard_score/zero_division", - "defaultType": "string", - "defaultValue": "warn" + "sklearn/sklearn.dummy/DummyRegressor/predict/return_std": { + "target": "sklearn/sklearn.dummy/DummyRegressor/predict/return_std", + "defaultType": "boolean", + "defaultValue": false }, - "sklearn/sklearn.metrics._classification/matthews_corrcoef/sample_weight": { - "target": "sklearn/sklearn.metrics._classification/matthews_corrcoef/sample_weight", + "sklearn/sklearn.dummy/DummyRegressor/score/sample_weight": { + "target": "sklearn/sklearn.dummy/DummyRegressor/score/sample_weight", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.metrics._classification/multilabel_confusion_matrix/sample_weight": { - "target": "sklearn/sklearn.metrics._classification/multilabel_confusion_matrix/sample_weight", - "defaultType": "none", - "defaultValue": null + "sklearn/sklearn.ensemble._bagging/BaggingClassifier/__init__/bootstrap": { + "target": "sklearn/sklearn.ensemble._bagging/BaggingClassifier/__init__/bootstrap", + "defaultType": "boolean", + "defaultValue": true }, - "sklearn/sklearn.metrics._classification/multilabel_confusion_matrix/samplewise": { - "target": "sklearn/sklearn.metrics._classification/multilabel_confusion_matrix/samplewise", + "sklearn/sklearn.ensemble._bagging/BaggingClassifier/__init__/warm_start": { + "target": "sklearn/sklearn.ensemble._bagging/BaggingClassifier/__init__/warm_start", "defaultType": "boolean", "defaultValue": false }, - "sklearn/sklearn.metrics._classification/precision_recall_fscore_support/pos_label": { - "target": "sklearn/sklearn.metrics._classification/precision_recall_fscore_support/pos_label", + "sklearn/sklearn.ensemble._bagging/BaggingRegressor/__init__/bootstrap": { + "target": "sklearn/sklearn.ensemble._bagging/BaggingRegressor/__init__/bootstrap", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.ensemble._bagging/BaggingRegressor/__init__/bootstrap_features": { + "target": "sklearn/sklearn.ensemble._bagging/BaggingRegressor/__init__/bootstrap_features", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.ensemble._forest/ExtraTreesClassifier/__init__/ccp_alpha": { + "target": "sklearn/sklearn.ensemble._forest/ExtraTreesClassifier/__init__/ccp_alpha", + "defaultType": "number", + "defaultValue": 0.0 + }, + "sklearn/sklearn.ensemble._forest/ExtraTreesRegressor/__init__/min_weight_fraction_leaf": { + "target": "sklearn/sklearn.ensemble._forest/ExtraTreesRegressor/__init__/min_weight_fraction_leaf", + "defaultType": "number", + "defaultValue": 0.0 + }, + "sklearn/sklearn.ensemble._forest/ExtraTreesRegressor/__init__/min_impurity_decrease": { + "target": "sklearn/sklearn.ensemble._forest/ExtraTreesRegressor/__init__/min_impurity_decrease", + "defaultType": "number", + "defaultValue": 0.0 + }, + "sklearn/sklearn.ensemble._forest/ExtraTreesRegressor/__init__/ccp_alpha": { + "target": "sklearn/sklearn.ensemble._forest/ExtraTreesRegressor/__init__/ccp_alpha", + "defaultType": "number", + "defaultValue": 0.0 + }, + "sklearn/sklearn.ensemble._forest/RandomTreesEmbedding/__init__/min_samples_leaf": { + "target": "sklearn/sklearn.ensemble._forest/RandomTreesEmbedding/__init__/min_samples_leaf", "defaultType": "number", "defaultValue": 1.0 }, - "sklearn/sklearn.metrics._classification/precision_recall_fscore_support/sample_weight": { - "target": "sklearn/sklearn.metrics._classification/precision_recall_fscore_support/sample_weight", + "sklearn/sklearn.ensemble._forest/RandomTreesEmbedding/__init__/min_weight_fraction_leaf": { + "target": "sklearn/sklearn.ensemble._forest/RandomTreesEmbedding/__init__/min_weight_fraction_leaf", + "defaultType": "number", + "defaultValue": 0.0 + }, + "sklearn/sklearn.ensemble._forest/RandomTreesEmbedding/__init__/max_leaf_nodes": { + "target": "sklearn/sklearn.ensemble._forest/RandomTreesEmbedding/__init__/max_leaf_nodes", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.metrics._classification/precision_recall_fscore_support/zero_division": { - "target": "sklearn/sklearn.metrics._classification/precision_recall_fscore_support/zero_division", - "defaultType": "string", - "defaultValue": "warn" + "sklearn/sklearn.ensemble._forest/RandomTreesEmbedding/__init__/min_impurity_decrease": { + "target": "sklearn/sklearn.ensemble._forest/RandomTreesEmbedding/__init__/min_impurity_decrease", + "defaultType": "number", + "defaultValue": 0.0 }, - "sklearn/sklearn.metrics._classification/precision_score/sample_weight": { - "target": "sklearn/sklearn.metrics._classification/precision_score/sample_weight", + "sklearn/sklearn.ensemble._forest/RandomTreesEmbedding/__init__/warm_start": { + "target": "sklearn/sklearn.ensemble._forest/RandomTreesEmbedding/__init__/warm_start", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.ensemble._forest/RandomTreesEmbedding/fit/y": { + "target": "sklearn/sklearn.ensemble._forest/RandomTreesEmbedding/fit/y", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.metrics._classification/recall_score/sample_weight": { - "target": "sklearn/sklearn.metrics._classification/recall_score/sample_weight", + "sklearn/sklearn.ensemble._forest/RandomTreesEmbedding/fit/sample_weight": { + "target": "sklearn/sklearn.ensemble._forest/RandomTreesEmbedding/fit/sample_weight", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.metrics._classification/zero_one_loss/normalize": { - "target": "sklearn/sklearn.metrics._classification/zero_one_loss/normalize", - "defaultType": "boolean", - "defaultValue": true - }, - "sklearn/sklearn.metrics._classification/zero_one_loss/sample_weight": { - "target": "sklearn/sklearn.metrics._classification/zero_one_loss/sample_weight", + "sklearn/sklearn.ensemble._gb/BaseGradientBoosting/fit/monitor": { + "target": "sklearn/sklearn.ensemble._gb/BaseGradientBoosting/fit/monitor", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.metrics._plot.confusion_matrix/plot_confusion_matrix/sample_weight": { - "target": "sklearn/sklearn.metrics._plot.confusion_matrix/plot_confusion_matrix/sample_weight", + "sklearn/sklearn.ensemble._gb/GradientBoostingClassifier/__init__/init": { + "target": "sklearn/sklearn.ensemble._gb/GradientBoostingClassifier/__init__/init", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.metrics._plot.confusion_matrix/plot_confusion_matrix/include_values": { - "target": "sklearn/sklearn.metrics._plot.confusion_matrix/plot_confusion_matrix/include_values", - "defaultType": "boolean", - "defaultValue": true + "sklearn/sklearn.ensemble._gb/GradientBoostingClassifier/__init__/validation_fraction": { + "target": "sklearn/sklearn.ensemble._gb/GradientBoostingClassifier/__init__/validation_fraction", + "defaultType": "number", + "defaultValue": 0.1 }, - "sklearn/sklearn.metrics._plot.confusion_matrix/plot_confusion_matrix/colorbar": { - "target": "sklearn/sklearn.metrics._plot.confusion_matrix/plot_confusion_matrix/colorbar", - "defaultType": "boolean", - "defaultValue": true + "sklearn/sklearn.ensemble._gb/GradientBoostingClassifier/__init__/tol": { + "target": "sklearn/sklearn.ensemble._gb/GradientBoostingClassifier/__init__/tol", + "defaultType": "number", + "defaultValue": 0.0001 }, - "sklearn/sklearn.metrics._plot.precision_recall_curve/plot_precision_recall_curve/sample_weight": { - "target": "sklearn/sklearn.metrics._plot.precision_recall_curve/plot_precision_recall_curve/sample_weight", + "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/BaseHistGradientBoosting/fit/sample_weight": { + "target": "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/BaseHistGradientBoosting/fit/sample_weight", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.metrics._plot.precision_recall_curve/plot_precision_recall_curve/response_method": { - "target": "sklearn/sklearn.metrics._plot.precision_recall_curve/plot_precision_recall_curve/response_method", - "defaultType": "string", - "defaultValue": "auto" - }, - "sklearn/sklearn.metrics._plot.precision_recall_curve/plot_precision_recall_curve/name": { - "target": "sklearn/sklearn.metrics._plot.precision_recall_curve/plot_precision_recall_curve/name", + "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingClassifier/__init__/categorical_features": { + "target": "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingClassifier/__init__/categorical_features", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.metrics._plot.precision_recall_curve/plot_precision_recall_curve/pos_label": { - "target": "sklearn/sklearn.metrics._plot.precision_recall_curve/plot_precision_recall_curve/pos_label", + "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingClassifier/__init__/monotonic_cst": { + "target": "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingClassifier/__init__/monotonic_cst", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.metrics._plot.roc_curve/plot_roc_curve/sample_weight": { - "target": "sklearn/sklearn.metrics._plot.roc_curve/plot_roc_curve/sample_weight", - "defaultType": "none", - "defaultValue": null + "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingClassifier/__init__/warm_start": { + "target": "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingClassifier/__init__/warm_start", + "defaultType": "boolean", + "defaultValue": false }, - "sklearn/sklearn.metrics._plot.roc_curve/plot_roc_curve/response_method": { - "target": "sklearn/sklearn.metrics._plot.roc_curve/plot_roc_curve/response_method", + "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingClassifier/__init__/early_stopping": { + "target": "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingClassifier/__init__/early_stopping", "defaultType": "string", "defaultValue": "auto" }, - "sklearn/sklearn.metrics._plot.roc_curve/plot_roc_curve/pos_label": { - "target": "sklearn/sklearn.metrics._plot.roc_curve/plot_roc_curve/pos_label", + "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingClassifier/__init__/tol": { + "target": "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingClassifier/__init__/tol", + "defaultType": "number", + "defaultValue": 1e-7 + }, + "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingRegressor/__init__/quantile": { + "target": "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingRegressor/__init__/quantile", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.metrics._ranking/average_precision_score/pos_label": { - "target": "sklearn/sklearn.metrics._ranking/average_precision_score/pos_label", + "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingRegressor/__init__/max_bins": { + "target": "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingRegressor/__init__/max_bins", "defaultType": "number", - "defaultValue": 1.0 + "defaultValue": 255.0 }, - "sklearn/sklearn.metrics._ranking/average_precision_score/sample_weight": { - "target": "sklearn/sklearn.metrics._ranking/average_precision_score/sample_weight", + "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingRegressor/__init__/categorical_features": { + "target": "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingRegressor/__init__/categorical_features", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.metrics._ranking/ndcg_score/k": { - "target": "sklearn/sklearn.metrics._ranking/ndcg_score/k", - "defaultType": "number", - "defaultValue": 5.0 - }, - "sklearn/sklearn.metrics._ranking/ndcg_score/sample_weight": { - "target": "sklearn/sklearn.metrics._ranking/ndcg_score/sample_weight", + "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingRegressor/__init__/monotonic_cst": { + "target": "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingRegressor/__init__/monotonic_cst", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.metrics._ranking/ndcg_score/ignore_ties": { - "target": "sklearn/sklearn.metrics._ranking/ndcg_score/ignore_ties", + "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingRegressor/__init__/warm_start": { + "target": "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingRegressor/__init__/warm_start", "defaultType": "boolean", "defaultValue": false }, - "sklearn/sklearn.metrics._ranking/precision_recall_curve/sample_weight": { - "target": "sklearn/sklearn.metrics._ranking/precision_recall_curve/sample_weight", + "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingRegressor/__init__/scoring": { + "target": "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingRegressor/__init__/scoring", + "defaultType": "string", + "defaultValue": "loss" + }, + "sklearn/sklearn.ensemble._iforest/IsolationForest/__init__/warm_start": { + "target": "sklearn/sklearn.ensemble._iforest/IsolationForest/__init__/warm_start", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.ensemble._iforest/IsolationForest/fit/sample_weight": { + "target": "sklearn/sklearn.ensemble._iforest/IsolationForest/fit/sample_weight", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.metrics._ranking/roc_auc_score/max_fpr": { - "target": "sklearn/sklearn.metrics._ranking/roc_auc_score/max_fpr", + "sklearn/sklearn.ensemble._stacking/StackingClassifier/fit/sample_weight": { + "target": "sklearn/sklearn.ensemble._stacking/StackingClassifier/fit/sample_weight", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.metrics._regression/explained_variance_score/sample_weight": { - "target": "sklearn/sklearn.metrics._regression/explained_variance_score/sample_weight", + "sklearn/sklearn.ensemble._stacking/StackingRegressor/__init__/passthrough": { + "target": "sklearn/sklearn.ensemble._stacking/StackingRegressor/__init__/passthrough", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.ensemble._stacking/StackingRegressor/fit/sample_weight": { + "target": "sklearn/sklearn.ensemble._stacking/StackingRegressor/fit/sample_weight", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.metrics._regression/explained_variance_score/multioutput": { - "target": "sklearn/sklearn.metrics._regression/explained_variance_score/multioutput", - "defaultType": "string", - "defaultValue": "uniform_average" - }, - "sklearn/sklearn.metrics._regression/explained_variance_score/force_finite": { - "target": "sklearn/sklearn.metrics._regression/explained_variance_score/force_finite", + "sklearn/sklearn.ensemble._voting/VotingClassifier/__init__/flatten_transform": { + "target": "sklearn/sklearn.ensemble._voting/VotingClassifier/__init__/flatten_transform", "defaultType": "boolean", "defaultValue": true }, - "sklearn/sklearn.metrics._regression/mean_absolute_percentage_error/sample_weight": { - "target": "sklearn/sklearn.metrics._regression/mean_absolute_percentage_error/sample_weight", + "sklearn/sklearn.ensemble._voting/VotingClassifier/fit/sample_weight": { + "target": "sklearn/sklearn.ensemble._voting/VotingClassifier/fit/sample_weight", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.metrics._regression/mean_absolute_percentage_error/multioutput": { - "target": "sklearn/sklearn.metrics._regression/mean_absolute_percentage_error/multioutput", + "sklearn/sklearn.ensemble._voting/VotingRegressor/fit/sample_weight": { + "target": "sklearn/sklearn.ensemble._voting/VotingRegressor/fit/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.ensemble._voting/_BaseVoting/fit/sample_weight": { + "target": "sklearn/sklearn.ensemble._voting/_BaseVoting/fit/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.ensemble._weight_boosting/AdaBoostRegressor/fit/sample_weight": { + "target": "sklearn/sklearn.ensemble._weight_boosting/AdaBoostRegressor/fit/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.feature_extraction._dict_vectorizer/DictVectorizer/__init__/separator": { + "target": "sklearn/sklearn.feature_extraction._dict_vectorizer/DictVectorizer/__init__/separator", "defaultType": "string", - "defaultValue": "uniform_average" + "defaultValue": "=" }, - "sklearn/sklearn.metrics._regression/mean_squared_log_error/sample_weight": { - "target": "sklearn/sklearn.metrics._regression/mean_squared_log_error/sample_weight", + "sklearn/sklearn.feature_extraction._dict_vectorizer/DictVectorizer/fit/y": { + "target": "sklearn/sklearn.feature_extraction._dict_vectorizer/DictVectorizer/fit/y", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.metrics._regression/mean_squared_log_error/multioutput": { - "target": "sklearn/sklearn.metrics._regression/mean_squared_log_error/multioutput", + "sklearn/sklearn.feature_extraction._dict_vectorizer/DictVectorizer/fit_transform/y": { + "target": "sklearn/sklearn.feature_extraction._dict_vectorizer/DictVectorizer/fit_transform/y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.feature_extraction._hash/FeatureHasher/__init__/input_type": { + "target": "sklearn/sklearn.feature_extraction._hash/FeatureHasher/__init__/input_type", "defaultType": "string", - "defaultValue": "uniform_average" + "defaultValue": "string" }, - "sklearn/sklearn.metrics._regression/mean_squared_log_error/squared": { - "target": "sklearn/sklearn.metrics._regression/mean_squared_log_error/squared", + "sklearn/sklearn.feature_extraction._hash/FeatureHasher/__init__/alternate_sign": { + "target": "sklearn/sklearn.feature_extraction._hash/FeatureHasher/__init__/alternate_sign", "defaultType": "boolean", "defaultValue": true }, - "sklearn/sklearn.metrics._regression/mean_tweedie_deviance/sample_weight": { - "target": "sklearn/sklearn.metrics._regression/mean_tweedie_deviance/sample_weight", + "sklearn/sklearn.feature_extraction.image/PatchExtractor/__init__/random_state": { + "target": "sklearn/sklearn.feature_extraction.image/PatchExtractor/__init__/random_state", + "defaultType": "number", + "defaultValue": 2016.0 + }, + "sklearn/sklearn.feature_extraction.text/HashingVectorizer/__init__/input": { + "target": "sklearn/sklearn.feature_extraction.text/HashingVectorizer/__init__/input", + "defaultType": "string", + "defaultValue": "content" + }, + "sklearn/sklearn.feature_extraction.text/HashingVectorizer/__init__/encoding": { + "target": "sklearn/sklearn.feature_extraction.text/HashingVectorizer/__init__/encoding", + "defaultType": "string", + "defaultValue": "utf-8" + }, + "sklearn/sklearn.feature_extraction.text/HashingVectorizer/fit/y": { + "target": "sklearn/sklearn.feature_extraction.text/HashingVectorizer/fit/y", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.metrics._regression/median_absolute_error/multioutput": { - "target": "sklearn/sklearn.metrics._regression/median_absolute_error/multioutput", - "defaultType": "string", - "defaultValue": "uniform_average" + "sklearn/sklearn.feature_extraction.text/HashingVectorizer/fit_transform/y": { + "target": "sklearn/sklearn.feature_extraction.text/HashingVectorizer/fit_transform/y", + "defaultType": "none", + "defaultValue": null }, - "sklearn/sklearn.metrics._regression/median_absolute_error/sample_weight": { - "target": "sklearn/sklearn.metrics._regression/median_absolute_error/sample_weight", + "sklearn/sklearn.feature_extraction.text/TfidfTransformer/fit/y": { + "target": "sklearn/sklearn.feature_extraction.text/TfidfTransformer/fit/y", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.metrics._regression/r2_score/force_finite": { - "target": "sklearn/sklearn.metrics._regression/r2_score/force_finite", + "sklearn/sklearn.feature_extraction.text/TfidfVectorizer/__init__/input": { + "target": "sklearn/sklearn.feature_extraction.text/TfidfVectorizer/__init__/input", + "defaultType": "string", + "defaultValue": "content" + }, + "sklearn/sklearn.feature_selection._from_model/SelectFromModel/__init__/norm_order": { + "target": "sklearn/sklearn.feature_selection._from_model/SelectFromModel/__init__/norm_order", + "defaultType": "number", + "defaultValue": 1.0 + }, + "sklearn/sklearn.feature_selection._from_model/SelectFromModel/__init__/importance_getter": { + "target": "sklearn/sklearn.feature_selection._from_model/SelectFromModel/__init__/importance_getter", + "defaultType": "string", + "defaultValue": "auto" + }, + "sklearn/sklearn.feature_selection._mutual_info/mutual_info_classif/n_neighbors": { + "target": "sklearn/sklearn.feature_selection._mutual_info/mutual_info_classif/n_neighbors", + "defaultType": "number", + "defaultValue": 3.0 + }, + "sklearn/sklearn.feature_selection._mutual_info/mutual_info_classif/copy": { + "target": "sklearn/sklearn.feature_selection._mutual_info/mutual_info_classif/copy", "defaultType": "boolean", "defaultValue": true }, - "sklearn/sklearn.metrics.cluster._supervised/adjusted_mutual_info_score/average_method": { - "target": "sklearn/sklearn.metrics.cluster._supervised/adjusted_mutual_info_score/average_method", + "sklearn/sklearn.feature_selection._mutual_info/mutual_info_regression/copy": { + "target": "sklearn/sklearn.feature_selection._mutual_info/mutual_info_regression/copy", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.feature_selection._rfe/RFE/__init__/importance_getter": { + "target": "sklearn/sklearn.feature_selection._rfe/RFE/__init__/importance_getter", "defaultType": "string", - "defaultValue": "arithmetic" + "defaultValue": "auto" }, - "sklearn/sklearn.metrics.cluster._supervised/normalized_mutual_info_score/average_method": { - "target": "sklearn/sklearn.metrics.cluster._supervised/normalized_mutual_info_score/average_method", + "sklearn/sklearn.feature_selection._rfe/RFECV/__init__/importance_getter": { + "target": "sklearn/sklearn.feature_selection._rfe/RFECV/__init__/importance_getter", "defaultType": "string", - "defaultValue": "arithmetic" + "defaultValue": "auto" }, - "sklearn/sklearn.metrics.cluster._supervised/v_measure_score/beta": { - "target": "sklearn/sklearn.metrics.cluster._supervised/v_measure_score/beta", + "sklearn/sklearn.feature_selection._rfe/RFECV/fit/groups": { + "target": "sklearn/sklearn.feature_selection._rfe/RFECV/fit/groups", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.feature_selection._sequential/SequentialFeatureSelector/__init__/n_features_to_select": { + "target": "sklearn/sklearn.feature_selection._sequential/SequentialFeatureSelector/__init__/n_features_to_select", "defaultType": "number", - "defaultValue": 1.0 + "defaultValue": 3.0 }, - "sklearn/sklearn.metrics.cluster._unsupervised/silhouette_samples/metric": { - "target": "sklearn/sklearn.metrics.cluster._unsupervised/silhouette_samples/metric", + "sklearn/sklearn.feature_selection._sequential/SequentialFeatureSelector/__init__/tol": { + "target": "sklearn/sklearn.feature_selection._sequential/SequentialFeatureSelector/__init__/tol", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.feature_selection._sequential/SequentialFeatureSelector/__init__/direction": { + "target": "sklearn/sklearn.feature_selection._sequential/SequentialFeatureSelector/__init__/direction", "defaultType": "string", - "defaultValue": "euclidean" + "defaultValue": "backward" }, - "sklearn/sklearn.metrics.pairwise/cosine_distances/Y": { - "target": "sklearn/sklearn.metrics.pairwise/cosine_distances/Y", + "sklearn/sklearn.feature_selection._sequential/SequentialFeatureSelector/__init__/scoring": { + "target": "sklearn/sklearn.feature_selection._sequential/SequentialFeatureSelector/__init__/scoring", + "defaultType": "string", + "defaultValue": "roc_auc" + }, + "sklearn/sklearn.feature_selection._sequential/SequentialFeatureSelector/__init__/cv": { + "target": "sklearn/sklearn.feature_selection._sequential/SequentialFeatureSelector/__init__/cv", + "defaultType": "number", + "defaultValue": 5.0 + }, + "sklearn/sklearn.feature_selection._sequential/SequentialFeatureSelector/__init__/n_jobs": { + "target": "sklearn/sklearn.feature_selection._sequential/SequentialFeatureSelector/__init__/n_jobs", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.metrics.pairwise/cosine_similarity/dense_output": { - "target": "sklearn/sklearn.metrics.pairwise/cosine_similarity/dense_output", + "sklearn/sklearn.feature_selection._univariate_selection/SelectFdr/__init__/alpha": { + "target": "sklearn/sklearn.feature_selection._univariate_selection/SelectFdr/__init__/alpha", + "defaultType": "number", + "defaultValue": 0.03 + }, + "sklearn/sklearn.feature_selection._univariate_selection/SelectFwe/__init__/alpha": { + "target": "sklearn/sklearn.feature_selection._univariate_selection/SelectFwe/__init__/alpha", + "defaultType": "number", + "defaultValue": 0.03 + }, + "sklearn/sklearn.feature_selection._univariate_selection/f_regression/center": { + "target": "sklearn/sklearn.feature_selection._univariate_selection/f_regression/center", "defaultType": "boolean", "defaultValue": true }, - "sklearn/sklearn.metrics.pairwise/euclidean_distances/Y_norm_squared": { - "target": "sklearn/sklearn.metrics.pairwise/euclidean_distances/Y_norm_squared", + "sklearn/sklearn.feature_selection._univariate_selection/f_regression/force_finite": { + "target": "sklearn/sklearn.feature_selection._univariate_selection/f_regression/force_finite", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.gaussian_process._gpc/GaussianProcessClassifier/__init__/copy_X_train": { + "target": "sklearn/sklearn.gaussian_process._gpc/GaussianProcessClassifier/__init__/copy_X_train", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.gaussian_process._gpc/GaussianProcessClassifier/__init__/multi_class": { + "target": "sklearn/sklearn.gaussian_process._gpc/GaussianProcessClassifier/__init__/multi_class", + "defaultType": "string", + "defaultValue": "one_vs_rest" + }, + "sklearn/sklearn.gaussian_process._gpr/GaussianProcessRegressor/__init__/alpha": { + "target": "sklearn/sklearn.gaussian_process._gpr/GaussianProcessRegressor/__init__/alpha", + "defaultType": "number", + "defaultValue": 1e-10 + }, + "sklearn/sklearn.gaussian_process._gpr/GaussianProcessRegressor/__init__/optimizer": { + "target": "sklearn/sklearn.gaussian_process._gpr/GaussianProcessRegressor/__init__/optimizer", + "defaultType": "string", + "defaultValue": "fmin_l_bfgs_b" + }, + "sklearn/sklearn.gaussian_process._gpr/GaussianProcessRegressor/__init__/normalize_y": { + "target": "sklearn/sklearn.gaussian_process._gpr/GaussianProcessRegressor/__init__/normalize_y", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.gaussian_process._gpr/GaussianProcessRegressor/__init__/copy_X_train": { + "target": "sklearn/sklearn.gaussian_process._gpr/GaussianProcessRegressor/__init__/copy_X_train", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.gaussian_process._gpr/GaussianProcessRegressor/log_marginal_likelihood/theta": { + "target": "sklearn/sklearn.gaussian_process._gpr/GaussianProcessRegressor/log_marginal_likelihood/theta", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.metrics.pairwise/euclidean_distances/squared": { - "target": "sklearn/sklearn.metrics.pairwise/euclidean_distances/squared", + "sklearn/sklearn.gaussian_process._gpr/GaussianProcessRegressor/log_marginal_likelihood/eval_gradient": { + "target": "sklearn/sklearn.gaussian_process._gpr/GaussianProcessRegressor/log_marginal_likelihood/eval_gradient", "defaultType": "boolean", "defaultValue": false }, - "sklearn/sklearn.metrics.pairwise/euclidean_distances/X_norm_squared": { - "target": "sklearn/sklearn.metrics.pairwise/euclidean_distances/X_norm_squared", + "sklearn/sklearn.gaussian_process._gpr/GaussianProcessRegressor/log_marginal_likelihood/clone_kernel": { + "target": "sklearn/sklearn.gaussian_process._gpr/GaussianProcessRegressor/log_marginal_likelihood/clone_kernel", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.gaussian_process._gpr/GaussianProcessRegressor/predict/return_std": { + "target": "sklearn/sklearn.gaussian_process._gpr/GaussianProcessRegressor/predict/return_std", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.gaussian_process._gpr/GaussianProcessRegressor/predict/return_cov": { + "target": "sklearn/sklearn.gaussian_process._gpr/GaussianProcessRegressor/predict/return_cov", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.gaussian_process.kernels/ConstantKernel/__init__/constant_value": { + "target": "sklearn/sklearn.gaussian_process.kernels/ConstantKernel/__init__/constant_value", + "defaultType": "number", + "defaultValue": 1.0 + }, + "sklearn/sklearn.gaussian_process.kernels/DotProduct/__init__/sigma_0": { + "target": "sklearn/sklearn.gaussian_process.kernels/DotProduct/__init__/sigma_0", + "defaultType": "number", + "defaultValue": 1.0 + }, + "sklearn/sklearn.gaussian_process.kernels/Matern/__init__/length_scale": { + "target": "sklearn/sklearn.gaussian_process.kernels/Matern/__init__/length_scale", + "defaultType": "number", + "defaultValue": 1.0 + }, + "sklearn/sklearn.gaussian_process.kernels/Matern/__init__/nu": { + "target": "sklearn/sklearn.gaussian_process.kernels/Matern/__init__/nu", + "defaultType": "number", + "defaultValue": 1.5 + }, + "sklearn/sklearn.gaussian_process.kernels/RationalQuadratic/__init__/length_scale": { + "target": "sklearn/sklearn.gaussian_process.kernels/RationalQuadratic/__init__/length_scale", + "defaultType": "number", + "defaultValue": 1.0 + }, + "sklearn/sklearn.gaussian_process.kernels/RationalQuadratic/__init__/alpha": { + "target": "sklearn/sklearn.gaussian_process.kernels/RationalQuadratic/__init__/alpha", + "defaultType": "number", + "defaultValue": 1.0 + }, + "sklearn/sklearn.impute._base/MissingIndicator/__init__/features": { + "target": "sklearn/sklearn.impute._base/MissingIndicator/__init__/features", + "defaultType": "string", + "defaultValue": "missing-only" + }, + "sklearn/sklearn.impute._base/MissingIndicator/__init__/sparse": { + "target": "sklearn/sklearn.impute._base/MissingIndicator/__init__/sparse", + "defaultType": "string", + "defaultValue": "auto" + }, + "sklearn/sklearn.impute._base/MissingIndicator/__init__/error_on_new": { + "target": "sklearn/sklearn.impute._base/MissingIndicator/__init__/error_on_new", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.impute._base/MissingIndicator/fit_transform/y": { + "target": "sklearn/sklearn.impute._base/MissingIndicator/fit_transform/y", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.metrics.pairwise/linear_kernel/dense_output": { - "target": "sklearn/sklearn.metrics.pairwise/linear_kernel/dense_output", + "sklearn/sklearn.impute._iterative/IterativeImputer/__init__/verbose": { + "target": "sklearn/sklearn.impute._iterative/IterativeImputer/__init__/verbose", + "defaultType": "number", + "defaultValue": 0.0 + }, + "sklearn/sklearn.impute._iterative/IterativeImputer/fit/y": { + "target": "sklearn/sklearn.impute._iterative/IterativeImputer/fit/y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.impute._iterative/IterativeImputer/fit_transform/y": { + "target": "sklearn/sklearn.impute._iterative/IterativeImputer/fit_transform/y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.impute._knn/KNNImputer/__init__/metric": { + "target": "sklearn/sklearn.impute._knn/KNNImputer/__init__/metric", + "defaultType": "string", + "defaultValue": "nan_euclidean" + }, + "sklearn/sklearn.impute._knn/KNNImputer/__init__/copy": { + "target": "sklearn/sklearn.impute._knn/KNNImputer/__init__/copy", "defaultType": "boolean", "defaultValue": true }, - "sklearn/sklearn.metrics.pairwise/manhattan_distances/sum_over_features": { - "target": "sklearn/sklearn.metrics.pairwise/manhattan_distances/sum_over_features", + "sklearn/sklearn.impute._knn/KNNImputer/__init__/add_indicator": { + "target": "sklearn/sklearn.impute._knn/KNNImputer/__init__/add_indicator", "defaultType": "boolean", - "defaultValue": true + "defaultValue": false }, - "sklearn/sklearn.metrics.pairwise/paired_distances/metric": { - "target": "sklearn/sklearn.metrics.pairwise/paired_distances/metric", + "sklearn/sklearn.impute._knn/KNNImputer/fit/y": { + "target": "sklearn/sklearn.impute._knn/KNNImputer/fit/y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.inspection._partial_dependence/partial_dependence/response_method": { + "target": "sklearn/sklearn.inspection._partial_dependence/partial_dependence/response_method", "defaultType": "string", - "defaultValue": "euclidean" + "defaultValue": "auto" }, - "sklearn/sklearn.metrics.pairwise/pairwise_distances/force_all_finite": { - "target": "sklearn/sklearn.metrics.pairwise/pairwise_distances/force_all_finite", + "sklearn/sklearn.inspection._partial_dependence/partial_dependence/grid_resolution": { + "target": "sklearn/sklearn.inspection._partial_dependence/partial_dependence/grid_resolution", + "defaultType": "number", + "defaultValue": 50.0 + }, + "sklearn/sklearn.inspection._partial_dependence/partial_dependence/method": { + "target": "sklearn/sklearn.inspection._partial_dependence/partial_dependence/method", + "defaultType": "string", + "defaultValue": "auto" + }, + "sklearn/sklearn.inspection._partial_dependence/partial_dependence/kind": { + "target": "sklearn/sklearn.inspection._partial_dependence/partial_dependence/kind", + "defaultType": "string", + "defaultValue": "average" + }, + "sklearn/sklearn.inspection._permutation_importance/permutation_importance/sample_weight": { + "target": "sklearn/sklearn.inspection._permutation_importance/permutation_importance/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.inspection._permutation_importance/permutation_importance/max_samples": { + "target": "sklearn/sklearn.inspection._permutation_importance/permutation_importance/max_samples", + "defaultType": "number", + "defaultValue": 1.0 + }, + "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/feature_names": { + "target": "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/feature_names", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/response_method": { + "target": "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/response_method", + "defaultType": "string", + "defaultValue": "auto" + }, + "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/n_cols": { + "target": "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/n_cols", + "defaultType": "number", + "defaultValue": 3.0 + }, + "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/method": { + "target": "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/method", + "defaultType": "string", + "defaultValue": "auto" + }, + "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/n_jobs": { + "target": "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/n_jobs", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/verbose": { + "target": "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/verbose", + "defaultType": "number", + "defaultValue": 0.0 + }, + "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/line_kw": { + "target": "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/line_kw", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/ice_lines_kw": { + "target": "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/ice_lines_kw", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/pd_line_kw": { + "target": "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/pd_line_kw", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/contour_kw": { + "target": "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/contour_kw", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/kind": { + "target": "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/kind", + "defaultType": "string", + "defaultValue": "average" + }, + "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/subsample": { + "target": "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/subsample", + "defaultType": "number", + "defaultValue": 1000.0 + }, + "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/random_state": { + "target": "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/random_state", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/centered": { + "target": "sklearn/sklearn.inspection._plot.partial_dependence/plot_partial_dependence/centered", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.isotonic/IsotonicRegression/__init__/increasing": { + "target": "sklearn/sklearn.isotonic/IsotonicRegression/__init__/increasing", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.isotonic/IsotonicRegression/fit/sample_weight": { + "target": "sklearn/sklearn.isotonic/IsotonicRegression/fit/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.kernel_approximation/Nystroem/__init__/kernel": { + "target": "sklearn/sklearn.kernel_approximation/Nystroem/__init__/kernel", + "defaultType": "string", + "defaultValue": "rbf" + }, + "sklearn/sklearn.kernel_approximation/Nystroem/__init__/gamma": { + "target": "sklearn/sklearn.kernel_approximation/Nystroem/__init__/gamma", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.kernel_approximation/Nystroem/__init__/coef0": { + "target": "sklearn/sklearn.kernel_approximation/Nystroem/__init__/coef0", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.kernel_approximation/Nystroem/__init__/degree": { + "target": "sklearn/sklearn.kernel_approximation/Nystroem/__init__/degree", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.kernel_approximation/Nystroem/__init__/kernel_params": { + "target": "sklearn/sklearn.kernel_approximation/Nystroem/__init__/kernel_params", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.kernel_approximation/Nystroem/__init__/n_components": { + "target": "sklearn/sklearn.kernel_approximation/Nystroem/__init__/n_components", + "defaultType": "number", + "defaultValue": 100.0 + }, + "sklearn/sklearn.kernel_approximation/Nystroem/__init__/n_jobs": { + "target": "sklearn/sklearn.kernel_approximation/Nystroem/__init__/n_jobs", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.kernel_approximation/RBFSampler/__init__/n_components": { + "target": "sklearn/sklearn.kernel_approximation/RBFSampler/__init__/n_components", + "defaultType": "number", + "defaultValue": 100.0 + }, + "sklearn/sklearn.kernel_ridge/KernelRidge/__init__/kernel_params": { + "target": "sklearn/sklearn.kernel_ridge/KernelRidge/__init__/kernel_params", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.kernel_ridge/KernelRidge/fit/sample_weight": { + "target": "sklearn/sklearn.kernel_ridge/KernelRidge/fit/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._bayes/ARDRegression/__init__/tol": { + "target": "sklearn/sklearn.linear_model._bayes/ARDRegression/__init__/tol", + "defaultType": "number", + "defaultValue": 0.001 + }, + "sklearn/sklearn.linear_model._bayes/ARDRegression/__init__/threshold_lambda": { + "target": "sklearn/sklearn.linear_model._bayes/ARDRegression/__init__/threshold_lambda", + "defaultType": "number", + "defaultValue": 10000.0 + }, + "sklearn/sklearn.linear_model._bayes/ARDRegression/__init__/copy_X": { + "target": "sklearn/sklearn.linear_model._bayes/ARDRegression/__init__/copy_X", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.linear_model._bayes/BayesianRidge/__init__/copy_X": { + "target": "sklearn/sklearn.linear_model._bayes/BayesianRidge/__init__/copy_X", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.linear_model._bayes/BayesianRidge/fit/sample_weight": { + "target": "sklearn/sklearn.linear_model._bayes/BayesianRidge/fit/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._coordinate_descent/ElasticNet/__init__/copy_X": { + "target": "sklearn/sklearn.linear_model._coordinate_descent/ElasticNet/__init__/copy_X", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.linear_model._coordinate_descent/ElasticNet/fit/check_input": { + "target": "sklearn/sklearn.linear_model._coordinate_descent/ElasticNet/fit/check_input", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.linear_model._coordinate_descent/ElasticNetCV/__init__/fit_intercept": { + "target": "sklearn/sklearn.linear_model._coordinate_descent/ElasticNetCV/__init__/fit_intercept", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.linear_model._coordinate_descent/ElasticNetCV/__init__/copy_X": { + "target": "sklearn/sklearn.linear_model._coordinate_descent/ElasticNetCV/__init__/copy_X", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.linear_model._coordinate_descent/ElasticNetCV/__init__/selection": { + "target": "sklearn/sklearn.linear_model._coordinate_descent/ElasticNetCV/__init__/selection", + "defaultType": "string", + "defaultValue": "cyclic" + }, + "sklearn/sklearn.linear_model._coordinate_descent/Lasso/__init__/copy_X": { + "target": "sklearn/sklearn.linear_model._coordinate_descent/Lasso/__init__/copy_X", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.linear_model._coordinate_descent/LassoCV/__init__/eps": { + "target": "sklearn/sklearn.linear_model._coordinate_descent/LassoCV/__init__/eps", + "defaultType": "number", + "defaultValue": 0.001 + }, + "sklearn/sklearn.linear_model._coordinate_descent/LassoCV/__init__/n_alphas": { + "target": "sklearn/sklearn.linear_model._coordinate_descent/LassoCV/__init__/n_alphas", + "defaultType": "number", + "defaultValue": 100.0 + }, + "sklearn/sklearn.linear_model._coordinate_descent/LassoCV/__init__/precompute": { + "target": "sklearn/sklearn.linear_model._coordinate_descent/LassoCV/__init__/precompute", + "defaultType": "string", + "defaultValue": "auto" + }, + "sklearn/sklearn.linear_model._coordinate_descent/LassoCV/__init__/copy_X": { + "target": "sklearn/sklearn.linear_model._coordinate_descent/LassoCV/__init__/copy_X", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.linear_model._coordinate_descent/LinearModelCV/fit/sample_weight": { + "target": "sklearn/sklearn.linear_model._coordinate_descent/LinearModelCV/fit/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNet/__init__/alpha": { + "target": "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNet/__init__/alpha", + "defaultType": "number", + "defaultValue": 0.001 + }, + "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNet/__init__/l1_ratio": { + "target": "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNet/__init__/l1_ratio", + "defaultType": "number", + "defaultValue": 0.5 + }, + "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNet/__init__/fit_intercept": { + "target": "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNet/__init__/fit_intercept", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNet/__init__/normalize": { + "target": "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNet/__init__/normalize", + "defaultType": "string", + "defaultValue": "deprecated" + }, + "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNet/__init__/copy_X": { + "target": "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNet/__init__/copy_X", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNet/__init__/max_iter": { + "target": "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNet/__init__/max_iter", + "defaultType": "number", + "defaultValue": 1000.0 + }, + "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNet/__init__/tol": { + "target": "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNet/__init__/tol", + "defaultType": "number", + "defaultValue": 0.0001 + }, + "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNet/__init__/warm_start": { + "target": "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNet/__init__/warm_start", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNetCV/__init__/l1_ratio": { + "target": "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNetCV/__init__/l1_ratio", + "defaultType": "number", + "defaultValue": 0.5 + }, + "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNetCV/__init__/eps": { + "target": "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNetCV/__init__/eps", + "defaultType": "number", + "defaultValue": 0.001 + }, + "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNetCV/__init__/n_alphas": { + "target": "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNetCV/__init__/n_alphas", + "defaultType": "number", + "defaultValue": 100.0 + }, + "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNetCV/__init__/alphas": { + "target": "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNetCV/__init__/alphas", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNetCV/__init__/fit_intercept": { + "target": "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNetCV/__init__/fit_intercept", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNetCV/__init__/normalize": { + "target": "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNetCV/__init__/normalize", + "defaultType": "string", + "defaultValue": "deprecated" + }, + "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNetCV/__init__/max_iter": { + "target": "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNetCV/__init__/max_iter", + "defaultType": "number", + "defaultValue": 1000.0 + }, + "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNetCV/__init__/tol": { + "target": "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNetCV/__init__/tol", + "defaultType": "number", + "defaultValue": 0.0001 + }, + "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNetCV/__init__/cv": { + "target": "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNetCV/__init__/cv", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNetCV/__init__/copy_X": { + "target": "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNetCV/__init__/copy_X", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNetCV/__init__/verbose": { + "target": "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNetCV/__init__/verbose", + "defaultType": "number", + "defaultValue": 0.0 + }, + "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNetCV/__init__/n_jobs": { + "target": "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNetCV/__init__/n_jobs", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNetCV/__init__/random_state": { + "target": "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNetCV/__init__/random_state", + "defaultType": "number", + "defaultValue": 17.0 + }, + "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNetCV/__init__/selection": { + "target": "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNetCV/__init__/selection", + "defaultType": "string", + "defaultValue": "cyclic" + }, + "sklearn/sklearn.linear_model._glm.glm/GammaRegressor/__init__/alpha": { + "target": "sklearn/sklearn.linear_model._glm.glm/GammaRegressor/__init__/alpha", + "defaultType": "number", + "defaultValue": 1.0 + }, + "sklearn/sklearn.linear_model._glm.glm/GammaRegressor/__init__/fit_intercept": { + "target": "sklearn/sklearn.linear_model._glm.glm/GammaRegressor/__init__/fit_intercept", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.linear_model._glm.glm/GammaRegressor/__init__/max_iter": { + "target": "sklearn/sklearn.linear_model._glm.glm/GammaRegressor/__init__/max_iter", + "defaultType": "number", + "defaultValue": 100.0 + }, + "sklearn/sklearn.linear_model._glm.glm/GammaRegressor/__init__/tol": { + "target": "sklearn/sklearn.linear_model._glm.glm/GammaRegressor/__init__/tol", + "defaultType": "number", + "defaultValue": 0.0001 + }, + "sklearn/sklearn.linear_model._glm.glm/GammaRegressor/__init__/warm_start": { + "target": "sklearn/sklearn.linear_model._glm.glm/GammaRegressor/__init__/warm_start", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.linear_model._glm.glm/GammaRegressor/__init__/verbose": { + "target": "sklearn/sklearn.linear_model._glm.glm/GammaRegressor/__init__/verbose", + "defaultType": "number", + "defaultValue": 0.0 + }, + "sklearn/sklearn.linear_model._glm.glm/PoissonRegressor/__init__/alpha": { + "target": "sklearn/sklearn.linear_model._glm.glm/PoissonRegressor/__init__/alpha", + "defaultType": "number", + "defaultValue": 1.0 + }, + "sklearn/sklearn.linear_model._glm.glm/PoissonRegressor/__init__/fit_intercept": { + "target": "sklearn/sklearn.linear_model._glm.glm/PoissonRegressor/__init__/fit_intercept", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.linear_model._glm.glm/PoissonRegressor/__init__/max_iter": { + "target": "sklearn/sklearn.linear_model._glm.glm/PoissonRegressor/__init__/max_iter", + "defaultType": "number", + "defaultValue": 100.0 + }, + "sklearn/sklearn.linear_model._glm.glm/PoissonRegressor/__init__/tol": { + "target": "sklearn/sklearn.linear_model._glm.glm/PoissonRegressor/__init__/tol", + "defaultType": "number", + "defaultValue": 0.0001 + }, + "sklearn/sklearn.linear_model._glm.glm/PoissonRegressor/__init__/warm_start": { + "target": "sklearn/sklearn.linear_model._glm.glm/PoissonRegressor/__init__/warm_start", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.linear_model._glm.glm/PoissonRegressor/__init__/verbose": { + "target": "sklearn/sklearn.linear_model._glm.glm/PoissonRegressor/__init__/verbose", + "defaultType": "number", + "defaultValue": 0.0 + }, + "sklearn/sklearn.linear_model._glm.glm/TweedieRegressor/__init__/fit_intercept": { + "target": "sklearn/sklearn.linear_model._glm.glm/TweedieRegressor/__init__/fit_intercept", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.linear_model._glm.glm/TweedieRegressor/__init__/tol": { + "target": "sklearn/sklearn.linear_model._glm.glm/TweedieRegressor/__init__/tol", + "defaultType": "number", + "defaultValue": 0.0001 + }, + "sklearn/sklearn.linear_model._glm.glm/TweedieRegressor/__init__/verbose": { + "target": "sklearn/sklearn.linear_model._glm.glm/TweedieRegressor/__init__/verbose", + "defaultType": "number", + "defaultValue": 0.0 + }, + "sklearn/sklearn.linear_model._huber/HuberRegressor/__init__/warm_start": { + "target": "sklearn/sklearn.linear_model._huber/HuberRegressor/__init__/warm_start", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.linear_model._huber/HuberRegressor/__init__/tol": { + "target": "sklearn/sklearn.linear_model._huber/HuberRegressor/__init__/tol", + "defaultType": "number", + "defaultValue": 1e-5 + }, + "sklearn/sklearn.linear_model._huber/HuberRegressor/fit/sample_weight": { + "target": "sklearn/sklearn.linear_model._huber/HuberRegressor/fit/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._least_angle/Lars/__init__/verbose": { + "target": "sklearn/sklearn.linear_model._least_angle/Lars/__init__/verbose", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.linear_model._least_angle/Lars/__init__/normalize": { + "target": "sklearn/sklearn.linear_model._least_angle/Lars/__init__/normalize", + "defaultType": "string", + "defaultValue": "deprecated" + }, + "sklearn/sklearn.linear_model._least_angle/Lars/__init__/precompute": { + "target": "sklearn/sklearn.linear_model._least_angle/Lars/__init__/precompute", + "defaultType": "string", + "defaultValue": "auto" + }, + "sklearn/sklearn.linear_model._least_angle/Lars/__init__/copy_X": { + "target": "sklearn/sklearn.linear_model._least_angle/Lars/__init__/copy_X", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.linear_model._least_angle/Lars/__init__/fit_path": { + "target": "sklearn/sklearn.linear_model._least_angle/Lars/__init__/fit_path", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.linear_model._least_angle/Lars/__init__/jitter": { + "target": "sklearn/sklearn.linear_model._least_angle/Lars/__init__/jitter", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._least_angle/Lars/__init__/random_state": { + "target": "sklearn/sklearn.linear_model._least_angle/Lars/__init__/random_state", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._least_angle/Lars/fit/Xy": { + "target": "sklearn/sklearn.linear_model._least_angle/Lars/fit/Xy", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._least_angle/LarsCV/__init__/fit_intercept": { + "target": "sklearn/sklearn.linear_model._least_angle/LarsCV/__init__/fit_intercept", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.linear_model._least_angle/LarsCV/__init__/verbose": { + "target": "sklearn/sklearn.linear_model._least_angle/LarsCV/__init__/verbose", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.linear_model._least_angle/LarsCV/__init__/max_iter": { + "target": "sklearn/sklearn.linear_model._least_angle/LarsCV/__init__/max_iter", + "defaultType": "number", + "defaultValue": 500.0 + }, + "sklearn/sklearn.linear_model._least_angle/LarsCV/__init__/normalize": { + "target": "sklearn/sklearn.linear_model._least_angle/LarsCV/__init__/normalize", + "defaultType": "string", + "defaultValue": "deprecated" + }, + "sklearn/sklearn.linear_model._least_angle/LarsCV/__init__/precompute": { + "target": "sklearn/sklearn.linear_model._least_angle/LarsCV/__init__/precompute", + "defaultType": "string", + "defaultValue": "auto" + }, + "sklearn/sklearn.linear_model._least_angle/LarsCV/__init__/cv": { + "target": "sklearn/sklearn.linear_model._least_angle/LarsCV/__init__/cv", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._least_angle/LarsCV/__init__/max_n_alphas": { + "target": "sklearn/sklearn.linear_model._least_angle/LarsCV/__init__/max_n_alphas", + "defaultType": "number", + "defaultValue": 1000.0 + }, + "sklearn/sklearn.linear_model._least_angle/LarsCV/__init__/n_jobs": { + "target": "sklearn/sklearn.linear_model._least_angle/LarsCV/__init__/n_jobs", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._least_angle/LarsCV/__init__/copy_X": { + "target": "sklearn/sklearn.linear_model._least_angle/LarsCV/__init__/copy_X", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.linear_model._least_angle/LassoLars/__init__/verbose": { + "target": "sklearn/sklearn.linear_model._least_angle/LassoLars/__init__/verbose", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.linear_model._least_angle/LassoLars/__init__/copy_X": { + "target": "sklearn/sklearn.linear_model._least_angle/LassoLars/__init__/copy_X", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.linear_model._least_angle/LassoLars/__init__/fit_path": { + "target": "sklearn/sklearn.linear_model._least_angle/LassoLars/__init__/fit_path", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.linear_model._least_angle/LassoLars/__init__/positive": { + "target": "sklearn/sklearn.linear_model._least_angle/LassoLars/__init__/positive", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.linear_model._least_angle/LassoLars/__init__/jitter": { + "target": "sklearn/sklearn.linear_model._least_angle/LassoLars/__init__/jitter", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._least_angle/LassoLars/__init__/random_state": { + "target": "sklearn/sklearn.linear_model._least_angle/LassoLars/__init__/random_state", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._least_angle/LassoLarsCV/__init__/fit_intercept": { + "target": "sklearn/sklearn.linear_model._least_angle/LassoLarsCV/__init__/fit_intercept", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.linear_model._least_angle/LassoLarsCV/__init__/verbose": { + "target": "sklearn/sklearn.linear_model._least_angle/LassoLarsCV/__init__/verbose", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.linear_model._least_angle/LassoLarsCV/__init__/copy_X": { + "target": "sklearn/sklearn.linear_model._least_angle/LassoLarsCV/__init__/copy_X", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.linear_model._least_angle/LassoLarsCV/__init__/positive": { + "target": "sklearn/sklearn.linear_model._least_angle/LassoLarsCV/__init__/positive", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.linear_model._least_angle/LassoLarsIC/__init__/verbose": { + "target": "sklearn/sklearn.linear_model._least_angle/LassoLarsIC/__init__/verbose", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.linear_model._least_angle/LassoLarsIC/__init__/normalize": { + "target": "sklearn/sklearn.linear_model._least_angle/LassoLarsIC/__init__/normalize", + "defaultType": "string", + "defaultValue": "deprecated" + }, + "sklearn/sklearn.linear_model._least_angle/LassoLarsIC/__init__/precompute": { + "target": "sklearn/sklearn.linear_model._least_angle/LassoLarsIC/__init__/precompute", + "defaultType": "string", + "defaultValue": "auto" + }, + "sklearn/sklearn.linear_model._least_angle/LassoLarsIC/__init__/max_iter": { + "target": "sklearn/sklearn.linear_model._least_angle/LassoLarsIC/__init__/max_iter", + "defaultType": "number", + "defaultValue": 500.0 + }, + "sklearn/sklearn.linear_model._least_angle/LassoLarsIC/__init__/copy_X": { + "target": "sklearn/sklearn.linear_model._least_angle/LassoLarsIC/__init__/copy_X", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.linear_model._least_angle/LassoLarsIC/__init__/positive": { + "target": "sklearn/sklearn.linear_model._least_angle/LassoLarsIC/__init__/positive", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.linear_model._least_angle/LassoLarsIC/__init__/noise_variance": { + "target": "sklearn/sklearn.linear_model._least_angle/LassoLarsIC/__init__/noise_variance", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._least_angle/LassoLarsIC/fit/copy_X": { + "target": "sklearn/sklearn.linear_model._least_angle/LassoLarsIC/fit/copy_X", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._logistic/LogisticRegressionCV/__init__/fit_intercept": { + "target": "sklearn/sklearn.linear_model._logistic/LogisticRegressionCV/__init__/fit_intercept", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.linear_model._logistic/LogisticRegressionCV/__init__/dual": { + "target": "sklearn/sklearn.linear_model._logistic/LogisticRegressionCV/__init__/dual", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.linear_model._logistic/LogisticRegressionCV/__init__/intercept_scaling": { + "target": "sklearn/sklearn.linear_model._logistic/LogisticRegressionCV/__init__/intercept_scaling", + "defaultType": "number", + "defaultValue": 1.0 + }, + "sklearn/sklearn.linear_model._logistic/LogisticRegressionCV/__init__/l1_ratios": { + "target": "sklearn/sklearn.linear_model._logistic/LogisticRegressionCV/__init__/l1_ratios", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._logistic/LogisticRegressionCV/fit/sample_weight": { + "target": "sklearn/sklearn.linear_model._logistic/LogisticRegressionCV/fit/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._logistic/LogisticRegressionCV/score/sample_weight": { + "target": "sklearn/sklearn.linear_model._logistic/LogisticRegressionCV/score/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._omp/OrthogonalMatchingPursuit/__init__/tol": { + "target": "sklearn/sklearn.linear_model._omp/OrthogonalMatchingPursuit/__init__/tol", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._omp/OrthogonalMatchingPursuit/__init__/fit_intercept": { + "target": "sklearn/sklearn.linear_model._omp/OrthogonalMatchingPursuit/__init__/fit_intercept", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.linear_model._omp/OrthogonalMatchingPursuitCV/__init__/copy": { + "target": "sklearn/sklearn.linear_model._omp/OrthogonalMatchingPursuitCV/__init__/copy", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.linear_model._omp/OrthogonalMatchingPursuitCV/__init__/fit_intercept": { + "target": "sklearn/sklearn.linear_model._omp/OrthogonalMatchingPursuitCV/__init__/fit_intercept", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.linear_model._omp/OrthogonalMatchingPursuitCV/__init__/normalize": { + "target": "sklearn/sklearn.linear_model._omp/OrthogonalMatchingPursuitCV/__init__/normalize", + "defaultType": "string", + "defaultValue": "deprecated" + }, + "sklearn/sklearn.linear_model._omp/OrthogonalMatchingPursuitCV/__init__/max_iter": { + "target": "sklearn/sklearn.linear_model._omp/OrthogonalMatchingPursuitCV/__init__/max_iter", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._omp/OrthogonalMatchingPursuitCV/__init__/cv": { + "target": "sklearn/sklearn.linear_model._omp/OrthogonalMatchingPursuitCV/__init__/cv", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._omp/OrthogonalMatchingPursuitCV/__init__/n_jobs": { + "target": "sklearn/sklearn.linear_model._omp/OrthogonalMatchingPursuitCV/__init__/n_jobs", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._omp/OrthogonalMatchingPursuitCV/__init__/verbose": { + "target": "sklearn/sklearn.linear_model._omp/OrthogonalMatchingPursuitCV/__init__/verbose", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveClassifier/__init__/C": { + "target": "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveClassifier/__init__/C", + "defaultType": "number", + "defaultValue": 1.0 + }, + "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveClassifier/__init__/fit_intercept": { + "target": "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveClassifier/__init__/fit_intercept", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveClassifier/__init__/tol": { + "target": "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveClassifier/__init__/tol", + "defaultType": "number", + "defaultValue": 0.001 + }, + "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveClassifier/__init__/validation_fraction": { + "target": "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveClassifier/__init__/validation_fraction", + "defaultType": "number", + "defaultValue": 0.1 + }, + "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveClassifier/__init__/shuffle": { + "target": "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveClassifier/__init__/shuffle", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveClassifier/__init__/warm_start": { + "target": "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveClassifier/__init__/warm_start", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveClassifier/fit/coef_init": { + "target": "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveClassifier/fit/coef_init", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveClassifier/fit/intercept_init": { + "target": "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveClassifier/fit/intercept_init", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveRegressor/__init__/fit_intercept": { + "target": "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveRegressor/__init__/fit_intercept", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveRegressor/__init__/max_iter": { + "target": "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveRegressor/__init__/max_iter", + "defaultType": "number", + "defaultValue": 1000.0 + }, + "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveRegressor/__init__/early_stopping": { + "target": "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveRegressor/__init__/early_stopping", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveRegressor/__init__/validation_fraction": { + "target": "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveRegressor/__init__/validation_fraction", + "defaultType": "number", + "defaultValue": 0.1 + }, + "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveRegressor/__init__/n_iter_no_change": { + "target": "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveRegressor/__init__/n_iter_no_change", + "defaultType": "number", + "defaultValue": 5.0 + }, + "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveRegressor/__init__/shuffle": { + "target": "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveRegressor/__init__/shuffle", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveRegressor/fit/coef_init": { + "target": "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveRegressor/fit/coef_init", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveRegressor/fit/intercept_init": { + "target": "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveRegressor/fit/intercept_init", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._perceptron/Perceptron/__init__/penalty": { + "target": "sklearn/sklearn.linear_model._perceptron/Perceptron/__init__/penalty", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._perceptron/Perceptron/__init__/alpha": { + "target": "sklearn/sklearn.linear_model._perceptron/Perceptron/__init__/alpha", + "defaultType": "number", + "defaultValue": 0.0001 + }, + "sklearn/sklearn.linear_model._perceptron/Perceptron/__init__/l1_ratio": { + "target": "sklearn/sklearn.linear_model._perceptron/Perceptron/__init__/l1_ratio", + "defaultType": "number", + "defaultValue": 0.15 + }, + "sklearn/sklearn.linear_model._perceptron/Perceptron/__init__/fit_intercept": { + "target": "sklearn/sklearn.linear_model._perceptron/Perceptron/__init__/fit_intercept", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.linear_model._perceptron/Perceptron/__init__/shuffle": { + "target": "sklearn/sklearn.linear_model._perceptron/Perceptron/__init__/shuffle", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.linear_model._perceptron/Perceptron/__init__/verbose": { + "target": "sklearn/sklearn.linear_model._perceptron/Perceptron/__init__/verbose", + "defaultType": "number", + "defaultValue": 0.0 + }, + "sklearn/sklearn.linear_model._perceptron/Perceptron/__init__/early_stopping": { + "target": "sklearn/sklearn.linear_model._perceptron/Perceptron/__init__/early_stopping", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.linear_model._perceptron/Perceptron/__init__/validation_fraction": { + "target": "sklearn/sklearn.linear_model._perceptron/Perceptron/__init__/validation_fraction", + "defaultType": "number", + "defaultValue": 0.1 + }, + "sklearn/sklearn.linear_model._perceptron/Perceptron/__init__/n_iter_no_change": { + "target": "sklearn/sklearn.linear_model._perceptron/Perceptron/__init__/n_iter_no_change", + "defaultType": "number", + "defaultValue": 5.0 + }, + "sklearn/sklearn.linear_model._perceptron/Perceptron/__init__/warm_start": { + "target": "sklearn/sklearn.linear_model._perceptron/Perceptron/__init__/warm_start", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.linear_model._ransac/RANSACRegressor/__init__/is_data_valid": { + "target": "sklearn/sklearn.linear_model._ransac/RANSACRegressor/__init__/is_data_valid", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._ransac/RANSACRegressor/__init__/is_model_valid": { + "target": "sklearn/sklearn.linear_model._ransac/RANSACRegressor/__init__/is_model_valid", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._ransac/RANSACRegressor/__init__/max_trials": { + "target": "sklearn/sklearn.linear_model._ransac/RANSACRegressor/__init__/max_trials", + "defaultType": "number", + "defaultValue": 100.0 + }, + "sklearn/sklearn.linear_model._ransac/RANSACRegressor/__init__/stop_probability": { + "target": "sklearn/sklearn.linear_model._ransac/RANSACRegressor/__init__/stop_probability", + "defaultType": "number", + "defaultValue": 0.99 + }, + "sklearn/sklearn.linear_model._ransac/RANSACRegressor/fit/sample_weight": { + "target": "sklearn/sklearn.linear_model._ransac/RANSACRegressor/fit/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._ridge/Ridge/__init__/positive": { + "target": "sklearn/sklearn.linear_model._ridge/Ridge/__init__/positive", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.linear_model._ridge/RidgeClassifier/__init__/fit_intercept": { + "target": "sklearn/sklearn.linear_model._ridge/RidgeClassifier/__init__/fit_intercept", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.linear_model._ridge/RidgeClassifier/__init__/copy_X": { + "target": "sklearn/sklearn.linear_model._ridge/RidgeClassifier/__init__/copy_X", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.linear_model._ridge/RidgeClassifier/__init__/max_iter": { + "target": "sklearn/sklearn.linear_model._ridge/RidgeClassifier/__init__/max_iter", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._ridge/RidgeClassifier/__init__/positive": { + "target": "sklearn/sklearn.linear_model._ridge/RidgeClassifier/__init__/positive", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.linear_model._ridge/RidgeClassifier/fit/sample_weight": { + "target": "sklearn/sklearn.linear_model._ridge/RidgeClassifier/fit/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._ridge/RidgeClassifierCV/__init__/fit_intercept": { + "target": "sklearn/sklearn.linear_model._ridge/RidgeClassifierCV/__init__/fit_intercept", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.linear_model._ridge/RidgeClassifierCV/__init__/normalize": { + "target": "sklearn/sklearn.linear_model._ridge/RidgeClassifierCV/__init__/normalize", + "defaultType": "string", + "defaultValue": "deprecated" + }, + "sklearn/sklearn.linear_model._ridge/RidgeClassifierCV/__init__/scoring": { + "target": "sklearn/sklearn.linear_model._ridge/RidgeClassifierCV/__init__/scoring", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._ridge/RidgeClassifierCV/__init__/class_weight": { + "target": "sklearn/sklearn.linear_model._ridge/RidgeClassifierCV/__init__/class_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._ridge/RidgeClassifierCV/__init__/store_cv_values": { + "target": "sklearn/sklearn.linear_model._ridge/RidgeClassifierCV/__init__/store_cv_values", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.linear_model._ridge/RidgeClassifierCV/fit/sample_weight": { + "target": "sklearn/sklearn.linear_model._ridge/RidgeClassifierCV/fit/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._ridge/_BaseRidgeCV/__init__/alpha_per_target": { + "target": "sklearn/sklearn.linear_model._ridge/_BaseRidgeCV/__init__/alpha_per_target", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.linear_model._ridge/_BaseRidgeCV/fit/sample_weight": { + "target": "sklearn/sklearn.linear_model._ridge/_BaseRidgeCV/fit/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._stochastic_gradient/BaseSGDClassifier/fit/coef_init": { + "target": "sklearn/sklearn.linear_model._stochastic_gradient/BaseSGDClassifier/fit/coef_init", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._stochastic_gradient/BaseSGDClassifier/fit/intercept_init": { + "target": "sklearn/sklearn.linear_model._stochastic_gradient/BaseSGDClassifier/fit/intercept_init", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._stochastic_gradient/BaseSGDClassifier/fit/sample_weight": { + "target": "sklearn/sklearn.linear_model._stochastic_gradient/BaseSGDClassifier/fit/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._stochastic_gradient/BaseSGDClassifier/partial_fit/sample_weight": { + "target": "sklearn/sklearn.linear_model._stochastic_gradient/BaseSGDClassifier/partial_fit/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._stochastic_gradient/BaseSGDRegressor/fit/coef_init": { + "target": "sklearn/sklearn.linear_model._stochastic_gradient/BaseSGDRegressor/fit/coef_init", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._stochastic_gradient/BaseSGDRegressor/fit/intercept_init": { + "target": "sklearn/sklearn.linear_model._stochastic_gradient/BaseSGDRegressor/fit/intercept_init", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._stochastic_gradient/BaseSGDRegressor/fit/sample_weight": { + "target": "sklearn/sklearn.linear_model._stochastic_gradient/BaseSGDRegressor/fit/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._stochastic_gradient/SGDClassifier/__init__/fit_intercept": { + "target": "sklearn/sklearn.linear_model._stochastic_gradient/SGDClassifier/__init__/fit_intercept", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.linear_model._stochastic_gradient/SGDClassifier/__init__/shuffle": { + "target": "sklearn/sklearn.linear_model._stochastic_gradient/SGDClassifier/__init__/shuffle", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.linear_model._stochastic_gradient/SGDClassifier/__init__/power_t": { + "target": "sklearn/sklearn.linear_model._stochastic_gradient/SGDClassifier/__init__/power_t", + "defaultType": "number", + "defaultValue": 0.5 + }, + "sklearn/sklearn.linear_model._stochastic_gradient/SGDRegressor/__init__/shuffle": { + "target": "sklearn/sklearn.linear_model._stochastic_gradient/SGDRegressor/__init__/shuffle", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.linear_model._stochastic_gradient/SGDRegressor/__init__/validation_fraction": { + "target": "sklearn/sklearn.linear_model._stochastic_gradient/SGDRegressor/__init__/validation_fraction", + "defaultType": "number", + "defaultValue": 0.1 + }, + "sklearn/sklearn.linear_model._stochastic_gradient/SGDRegressor/__init__/average": { + "target": "sklearn/sklearn.linear_model._stochastic_gradient/SGDRegressor/__init__/average", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.linear_model._theil_sen/TheilSenRegressor/__init__/copy_X": { + "target": "sklearn/sklearn.linear_model._theil_sen/TheilSenRegressor/__init__/copy_X", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.linear_model._theil_sen/TheilSenRegressor/__init__/max_subpopulation": { + "target": "sklearn/sklearn.linear_model._theil_sen/TheilSenRegressor/__init__/max_subpopulation", + "defaultType": "number", + "defaultValue": 10000.0 + }, + "sklearn/sklearn.linear_model._theil_sen/TheilSenRegressor/__init__/n_subsamples": { + "target": "sklearn/sklearn.linear_model._theil_sen/TheilSenRegressor/__init__/n_subsamples", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._theil_sen/TheilSenRegressor/__init__/max_iter": { + "target": "sklearn/sklearn.linear_model._theil_sen/TheilSenRegressor/__init__/max_iter", + "defaultType": "number", + "defaultValue": 300.0 + }, + "sklearn/sklearn.linear_model._theil_sen/TheilSenRegressor/__init__/tol": { + "target": "sklearn/sklearn.linear_model._theil_sen/TheilSenRegressor/__init__/tol", + "defaultType": "number", + "defaultValue": 0.001 + }, + "sklearn/sklearn.linear_model._theil_sen/TheilSenRegressor/__init__/n_jobs": { + "target": "sklearn/sklearn.linear_model._theil_sen/TheilSenRegressor/__init__/n_jobs", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.linear_model._theil_sen/TheilSenRegressor/__init__/verbose": { + "target": "sklearn/sklearn.linear_model._theil_sen/TheilSenRegressor/__init__/verbose", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.manifold._isomap/Isomap/__init__/radius": { + "target": "sklearn/sklearn.manifold._isomap/Isomap/__init__/radius", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.manifold._isomap/Isomap/__init__/tol": { + "target": "sklearn/sklearn.manifold._isomap/Isomap/__init__/tol", + "defaultType": "number", + "defaultValue": 0.0 + }, + "sklearn/sklearn.manifold._isomap/Isomap/__init__/max_iter": { + "target": "sklearn/sklearn.manifold._isomap/Isomap/__init__/max_iter", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.manifold._isomap/Isomap/__init__/path_method": { + "target": "sklearn/sklearn.manifold._isomap/Isomap/__init__/path_method", + "defaultType": "string", + "defaultValue": "auto" + }, + "sklearn/sklearn.manifold._isomap/Isomap/__init__/neighbors_algorithm": { + "target": "sklearn/sklearn.manifold._isomap/Isomap/__init__/neighbors_algorithm", + "defaultType": "string", + "defaultValue": "auto" + }, + "sklearn/sklearn.manifold._isomap/Isomap/__init__/metric": { + "target": "sklearn/sklearn.manifold._isomap/Isomap/__init__/metric", + "defaultType": "string", + "defaultValue": "minkowski" + }, + "sklearn/sklearn.manifold._isomap/Isomap/__init__/p": { + "target": "sklearn/sklearn.manifold._isomap/Isomap/__init__/p", + "defaultType": "number", + "defaultValue": 2.0 + }, + "sklearn/sklearn.manifold._isomap/Isomap/__init__/metric_params": { + "target": "sklearn/sklearn.manifold._isomap/Isomap/__init__/metric_params", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.manifold._isomap/Isomap/fit/y": { + "target": "sklearn/sklearn.manifold._isomap/Isomap/fit/y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.manifold._isomap/Isomap/fit_transform/y": { + "target": "sklearn/sklearn.manifold._isomap/Isomap/fit_transform/y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.manifold._locally_linear/LocallyLinearEmbedding/__init__/reg": { + "target": "sklearn/sklearn.manifold._locally_linear/LocallyLinearEmbedding/__init__/reg", + "defaultType": "number", + "defaultValue": 0.001 + }, + "sklearn/sklearn.manifold._locally_linear/LocallyLinearEmbedding/__init__/tol": { + "target": "sklearn/sklearn.manifold._locally_linear/LocallyLinearEmbedding/__init__/tol", + "defaultType": "number", + "defaultValue": 1e-6 + }, + "sklearn/sklearn.manifold._locally_linear/LocallyLinearEmbedding/__init__/max_iter": { + "target": "sklearn/sklearn.manifold._locally_linear/LocallyLinearEmbedding/__init__/max_iter", + "defaultType": "number", + "defaultValue": 100.0 + }, + "sklearn/sklearn.manifold._locally_linear/LocallyLinearEmbedding/__init__/hessian_tol": { + "target": "sklearn/sklearn.manifold._locally_linear/LocallyLinearEmbedding/__init__/hessian_tol", + "defaultType": "number", + "defaultValue": 0.0001 + }, + "sklearn/sklearn.manifold._locally_linear/LocallyLinearEmbedding/__init__/modified_tol": { + "target": "sklearn/sklearn.manifold._locally_linear/LocallyLinearEmbedding/__init__/modified_tol", + "defaultType": "number", + "defaultValue": 1e-12 + }, + "sklearn/sklearn.manifold._locally_linear/LocallyLinearEmbedding/__init__/neighbors_algorithm": { + "target": "sklearn/sklearn.manifold._locally_linear/LocallyLinearEmbedding/__init__/neighbors_algorithm", + "defaultType": "string", + "defaultValue": "auto" + }, + "sklearn/sklearn.manifold._locally_linear/LocallyLinearEmbedding/__init__/n_jobs": { + "target": "sklearn/sklearn.manifold._locally_linear/LocallyLinearEmbedding/__init__/n_jobs", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.manifold._locally_linear/LocallyLinearEmbedding/fit_transform/y": { + "target": "sklearn/sklearn.manifold._locally_linear/LocallyLinearEmbedding/fit_transform/y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.manifold._mds/MDS/__init__/metric": { + "target": "sklearn/sklearn.manifold._mds/MDS/__init__/metric", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.manifold._mds/MDS/__init__/verbose": { + "target": "sklearn/sklearn.manifold._mds/MDS/__init__/verbose", + "defaultType": "number", + "defaultValue": 0.0 + }, + "sklearn/sklearn.manifold._mds/MDS/__init__/eps": { + "target": "sklearn/sklearn.manifold._mds/MDS/__init__/eps", + "defaultType": "number", + "defaultValue": 0.001 + }, + "sklearn/sklearn.manifold._mds/MDS/fit_transform/y": { + "target": "sklearn/sklearn.manifold._mds/MDS/fit_transform/y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.manifold._mds/MDS/fit_transform/init": { + "target": "sklearn/sklearn.manifold._mds/MDS/fit_transform/init", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.manifold._mds/smacof/metric": { + "target": "sklearn/sklearn.manifold._mds/smacof/metric", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.manifold._mds/smacof/n_components": { + "target": "sklearn/sklearn.manifold._mds/smacof/n_components", + "defaultType": "number", + "defaultValue": 2.0 + }, + "sklearn/sklearn.manifold._mds/smacof/init": { + "target": "sklearn/sklearn.manifold._mds/smacof/init", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.manifold._mds/smacof/n_init": { + "target": "sklearn/sklearn.manifold._mds/smacof/n_init", + "defaultType": "number", + "defaultValue": 8.0 + }, + "sklearn/sklearn.manifold._mds/smacof/n_jobs": { + "target": "sklearn/sklearn.manifold._mds/smacof/n_jobs", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.manifold._mds/smacof/max_iter": { + "target": "sklearn/sklearn.manifold._mds/smacof/max_iter", + "defaultType": "number", + "defaultValue": 300.0 + }, + "sklearn/sklearn.manifold._mds/smacof/verbose": { + "target": "sklearn/sklearn.manifold._mds/smacof/verbose", + "defaultType": "number", + "defaultValue": 0.0 + }, + "sklearn/sklearn.manifold._mds/smacof/eps": { + "target": "sklearn/sklearn.manifold._mds/smacof/eps", + "defaultType": "number", + "defaultValue": 0.001 + }, + "sklearn/sklearn.manifold._mds/smacof/random_state": { + "target": "sklearn/sklearn.manifold._mds/smacof/random_state", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.manifold._mds/smacof/return_n_iter": { + "target": "sklearn/sklearn.manifold._mds/smacof/return_n_iter", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.manifold._spectral_embedding/SpectralEmbedding/__init__/gamma": { + "target": "sklearn/sklearn.manifold._spectral_embedding/SpectralEmbedding/__init__/gamma", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.manifold._spectral_embedding/SpectralEmbedding/__init__/random_state": { + "target": "sklearn/sklearn.manifold._spectral_embedding/SpectralEmbedding/__init__/random_state", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.manifold._spectral_embedding/SpectralEmbedding/__init__/eigen_solver": { + "target": "sklearn/sklearn.manifold._spectral_embedding/SpectralEmbedding/__init__/eigen_solver", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.manifold._spectral_embedding/SpectralEmbedding/fit_transform/y": { + "target": "sklearn/sklearn.manifold._spectral_embedding/SpectralEmbedding/fit_transform/y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.manifold._t_sne/TSNE/__init__/metric_params": { + "target": "sklearn/sklearn.manifold._t_sne/TSNE/__init__/metric_params", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.manifold._t_sne/TSNE/__init__/square_distances": { + "target": "sklearn/sklearn.manifold._t_sne/TSNE/__init__/square_distances", + "defaultType": "string", + "defaultValue": "deprecated" + }, + "sklearn/sklearn.metrics._classification/balanced_accuracy_score/adjusted": { + "target": "sklearn/sklearn.metrics._classification/balanced_accuracy_score/adjusted", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.metrics._classification/brier_score_loss/sample_weight": { + "target": "sklearn/sklearn.metrics._classification/brier_score_loss/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics._classification/classification_report/sample_weight": { + "target": "sklearn/sklearn.metrics._classification/classification_report/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics._classification/f1_score/sample_weight": { + "target": "sklearn/sklearn.metrics._classification/f1_score/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics._classification/fbeta_score/labels": { + "target": "sklearn/sklearn.metrics._classification/fbeta_score/labels", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics._classification/fbeta_score/pos_label": { + "target": "sklearn/sklearn.metrics._classification/fbeta_score/pos_label", + "defaultType": "number", + "defaultValue": 1.0 + }, + "sklearn/sklearn.metrics._classification/fbeta_score/sample_weight": { + "target": "sklearn/sklearn.metrics._classification/fbeta_score/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics._classification/fbeta_score/zero_division": { + "target": "sklearn/sklearn.metrics._classification/fbeta_score/zero_division", + "defaultType": "string", + "defaultValue": "warn" + }, + "sklearn/sklearn.metrics._classification/hamming_loss/sample_weight": { + "target": "sklearn/sklearn.metrics._classification/hamming_loss/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics._classification/hinge_loss/labels": { + "target": "sklearn/sklearn.metrics._classification/hinge_loss/labels", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics._classification/hinge_loss/sample_weight": { + "target": "sklearn/sklearn.metrics._classification/hinge_loss/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics._classification/jaccard_score/labels": { + "target": "sklearn/sklearn.metrics._classification/jaccard_score/labels", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics._classification/jaccard_score/sample_weight": { + "target": "sklearn/sklearn.metrics._classification/jaccard_score/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics._classification/jaccard_score/zero_division": { + "target": "sklearn/sklearn.metrics._classification/jaccard_score/zero_division", + "defaultType": "string", + "defaultValue": "warn" + }, + "sklearn/sklearn.metrics._classification/matthews_corrcoef/sample_weight": { + "target": "sklearn/sklearn.metrics._classification/matthews_corrcoef/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics._classification/multilabel_confusion_matrix/sample_weight": { + "target": "sklearn/sklearn.metrics._classification/multilabel_confusion_matrix/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics._classification/multilabel_confusion_matrix/samplewise": { + "target": "sklearn/sklearn.metrics._classification/multilabel_confusion_matrix/samplewise", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.metrics._classification/precision_recall_fscore_support/pos_label": { + "target": "sklearn/sklearn.metrics._classification/precision_recall_fscore_support/pos_label", + "defaultType": "number", + "defaultValue": 1.0 + }, + "sklearn/sklearn.metrics._classification/precision_recall_fscore_support/sample_weight": { + "target": "sklearn/sklearn.metrics._classification/precision_recall_fscore_support/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics._classification/precision_recall_fscore_support/zero_division": { + "target": "sklearn/sklearn.metrics._classification/precision_recall_fscore_support/zero_division", + "defaultType": "string", + "defaultValue": "warn" + }, + "sklearn/sklearn.metrics._classification/precision_score/sample_weight": { + "target": "sklearn/sklearn.metrics._classification/precision_score/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics._classification/recall_score/sample_weight": { + "target": "sklearn/sklearn.metrics._classification/recall_score/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics._classification/zero_one_loss/normalize": { + "target": "sklearn/sklearn.metrics._classification/zero_one_loss/normalize", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.metrics._classification/zero_one_loss/sample_weight": { + "target": "sklearn/sklearn.metrics._classification/zero_one_loss/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics._plot.confusion_matrix/ConfusionMatrixDisplay/plot/im_kw": { + "target": "sklearn/sklearn.metrics._plot.confusion_matrix/ConfusionMatrixDisplay/plot/im_kw", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics._plot.confusion_matrix/plot_confusion_matrix/sample_weight": { + "target": "sklearn/sklearn.metrics._plot.confusion_matrix/plot_confusion_matrix/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics._plot.confusion_matrix/plot_confusion_matrix/include_values": { + "target": "sklearn/sklearn.metrics._plot.confusion_matrix/plot_confusion_matrix/include_values", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.metrics._plot.confusion_matrix/plot_confusion_matrix/colorbar": { + "target": "sklearn/sklearn.metrics._plot.confusion_matrix/plot_confusion_matrix/colorbar", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.metrics._plot.precision_recall_curve/PrecisionRecallDisplay/__init__/average_precision": { + "target": "sklearn/sklearn.metrics._plot.precision_recall_curve/PrecisionRecallDisplay/__init__/average_precision", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics._plot.precision_recall_curve/PrecisionRecallDisplay/__init__/estimator_name": { + "target": "sklearn/sklearn.metrics._plot.precision_recall_curve/PrecisionRecallDisplay/__init__/estimator_name", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics._plot.precision_recall_curve/PrecisionRecallDisplay/__init__/pos_label": { + "target": "sklearn/sklearn.metrics._plot.precision_recall_curve/PrecisionRecallDisplay/__init__/pos_label", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics._plot.precision_recall_curve/PrecisionRecallDisplay/plot/ax": { + "target": "sklearn/sklearn.metrics._plot.precision_recall_curve/PrecisionRecallDisplay/plot/ax", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics._plot.precision_recall_curve/PrecisionRecallDisplay/plot/name": { + "target": "sklearn/sklearn.metrics._plot.precision_recall_curve/PrecisionRecallDisplay/plot/name", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics._plot.precision_recall_curve/plot_precision_recall_curve/sample_weight": { + "target": "sklearn/sklearn.metrics._plot.precision_recall_curve/plot_precision_recall_curve/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics._plot.precision_recall_curve/plot_precision_recall_curve/response_method": { + "target": "sklearn/sklearn.metrics._plot.precision_recall_curve/plot_precision_recall_curve/response_method", + "defaultType": "string", + "defaultValue": "auto" + }, + "sklearn/sklearn.metrics._plot.precision_recall_curve/plot_precision_recall_curve/name": { + "target": "sklearn/sklearn.metrics._plot.precision_recall_curve/plot_precision_recall_curve/name", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics._plot.precision_recall_curve/plot_precision_recall_curve/pos_label": { + "target": "sklearn/sklearn.metrics._plot.precision_recall_curve/plot_precision_recall_curve/pos_label", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics._plot.roc_curve/RocCurveDisplay/__init__/roc_auc": { + "target": "sklearn/sklearn.metrics._plot.roc_curve/RocCurveDisplay/__init__/roc_auc", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics._plot.roc_curve/RocCurveDisplay/__init__/estimator_name": { + "target": "sklearn/sklearn.metrics._plot.roc_curve/RocCurveDisplay/__init__/estimator_name", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics._plot.roc_curve/RocCurveDisplay/__init__/pos_label": { + "target": "sklearn/sklearn.metrics._plot.roc_curve/RocCurveDisplay/__init__/pos_label", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics._plot.roc_curve/RocCurveDisplay/plot/ax": { + "target": "sklearn/sklearn.metrics._plot.roc_curve/RocCurveDisplay/plot/ax", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics._plot.roc_curve/RocCurveDisplay/plot/name": { + "target": "sklearn/sklearn.metrics._plot.roc_curve/RocCurveDisplay/plot/name", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics._plot.roc_curve/plot_roc_curve/sample_weight": { + "target": "sklearn/sklearn.metrics._plot.roc_curve/plot_roc_curve/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics._plot.roc_curve/plot_roc_curve/response_method": { + "target": "sklearn/sklearn.metrics._plot.roc_curve/plot_roc_curve/response_method", + "defaultType": "string", + "defaultValue": "auto" + }, + "sklearn/sklearn.metrics._plot.roc_curve/plot_roc_curve/pos_label": { + "target": "sklearn/sklearn.metrics._plot.roc_curve/plot_roc_curve/pos_label", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics._ranking/average_precision_score/pos_label": { + "target": "sklearn/sklearn.metrics._ranking/average_precision_score/pos_label", + "defaultType": "number", + "defaultValue": 1.0 + }, + "sklearn/sklearn.metrics._ranking/average_precision_score/sample_weight": { + "target": "sklearn/sklearn.metrics._ranking/average_precision_score/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics._ranking/ndcg_score/k": { + "target": "sklearn/sklearn.metrics._ranking/ndcg_score/k", + "defaultType": "number", + "defaultValue": 5.0 + }, + "sklearn/sklearn.metrics._ranking/ndcg_score/sample_weight": { + "target": "sklearn/sklearn.metrics._ranking/ndcg_score/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics._ranking/ndcg_score/ignore_ties": { + "target": "sklearn/sklearn.metrics._ranking/ndcg_score/ignore_ties", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.metrics._ranking/precision_recall_curve/sample_weight": { + "target": "sklearn/sklearn.metrics._ranking/precision_recall_curve/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics._ranking/roc_auc_score/max_fpr": { + "target": "sklearn/sklearn.metrics._ranking/roc_auc_score/max_fpr", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics._regression/explained_variance_score/sample_weight": { + "target": "sklearn/sklearn.metrics._regression/explained_variance_score/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics._regression/explained_variance_score/multioutput": { + "target": "sklearn/sklearn.metrics._regression/explained_variance_score/multioutput", + "defaultType": "string", + "defaultValue": "uniform_average" + }, + "sklearn/sklearn.metrics._regression/explained_variance_score/force_finite": { + "target": "sklearn/sklearn.metrics._regression/explained_variance_score/force_finite", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.metrics._regression/mean_absolute_percentage_error/sample_weight": { + "target": "sklearn/sklearn.metrics._regression/mean_absolute_percentage_error/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics._regression/mean_absolute_percentage_error/multioutput": { + "target": "sklearn/sklearn.metrics._regression/mean_absolute_percentage_error/multioutput", + "defaultType": "string", + "defaultValue": "uniform_average" + }, + "sklearn/sklearn.metrics._regression/mean_squared_log_error/sample_weight": { + "target": "sklearn/sklearn.metrics._regression/mean_squared_log_error/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics._regression/mean_squared_log_error/multioutput": { + "target": "sklearn/sklearn.metrics._regression/mean_squared_log_error/multioutput", + "defaultType": "string", + "defaultValue": "uniform_average" + }, + "sklearn/sklearn.metrics._regression/mean_squared_log_error/squared": { + "target": "sklearn/sklearn.metrics._regression/mean_squared_log_error/squared", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.metrics._regression/mean_tweedie_deviance/sample_weight": { + "target": "sklearn/sklearn.metrics._regression/mean_tweedie_deviance/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics._regression/median_absolute_error/multioutput": { + "target": "sklearn/sklearn.metrics._regression/median_absolute_error/multioutput", + "defaultType": "string", + "defaultValue": "uniform_average" + }, + "sklearn/sklearn.metrics._regression/median_absolute_error/sample_weight": { + "target": "sklearn/sklearn.metrics._regression/median_absolute_error/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics._regression/r2_score/force_finite": { + "target": "sklearn/sklearn.metrics._regression/r2_score/force_finite", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.metrics.cluster._supervised/adjusted_mutual_info_score/average_method": { + "target": "sklearn/sklearn.metrics.cluster._supervised/adjusted_mutual_info_score/average_method", + "defaultType": "string", + "defaultValue": "arithmetic" + }, + "sklearn/sklearn.metrics.cluster._supervised/normalized_mutual_info_score/average_method": { + "target": "sklearn/sklearn.metrics.cluster._supervised/normalized_mutual_info_score/average_method", + "defaultType": "string", + "defaultValue": "arithmetic" + }, + "sklearn/sklearn.metrics.cluster._supervised/v_measure_score/beta": { + "target": "sklearn/sklearn.metrics.cluster._supervised/v_measure_score/beta", + "defaultType": "number", + "defaultValue": 1.0 + }, + "sklearn/sklearn.metrics.cluster._unsupervised/silhouette_samples/metric": { + "target": "sklearn/sklearn.metrics.cluster._unsupervised/silhouette_samples/metric", + "defaultType": "string", + "defaultValue": "euclidean" + }, + "sklearn/sklearn.metrics.pairwise/cosine_distances/Y": { + "target": "sklearn/sklearn.metrics.pairwise/cosine_distances/Y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics.pairwise/cosine_similarity/dense_output": { + "target": "sklearn/sklearn.metrics.pairwise/cosine_similarity/dense_output", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.metrics.pairwise/euclidean_distances/Y_norm_squared": { + "target": "sklearn/sklearn.metrics.pairwise/euclidean_distances/Y_norm_squared", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics.pairwise/euclidean_distances/squared": { + "target": "sklearn/sklearn.metrics.pairwise/euclidean_distances/squared", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.metrics.pairwise/euclidean_distances/X_norm_squared": { + "target": "sklearn/sklearn.metrics.pairwise/euclidean_distances/X_norm_squared", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics.pairwise/linear_kernel/dense_output": { + "target": "sklearn/sklearn.metrics.pairwise/linear_kernel/dense_output", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.metrics.pairwise/manhattan_distances/sum_over_features": { + "target": "sklearn/sklearn.metrics.pairwise/manhattan_distances/sum_over_features", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.metrics.pairwise/paired_distances/metric": { + "target": "sklearn/sklearn.metrics.pairwise/paired_distances/metric", + "defaultType": "string", + "defaultValue": "euclidean" + }, + "sklearn/sklearn.metrics.pairwise/pairwise_distances/force_all_finite": { + "target": "sklearn/sklearn.metrics.pairwise/pairwise_distances/force_all_finite", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.metrics.pairwise/pairwise_distances_argmin/axis": { + "target": "sklearn/sklearn.metrics.pairwise/pairwise_distances_argmin/axis", + "defaultType": "number", + "defaultValue": 1.0 + }, + "sklearn/sklearn.metrics.pairwise/pairwise_distances_argmin/metric": { + "target": "sklearn/sklearn.metrics.pairwise/pairwise_distances_argmin/metric", + "defaultType": "string", + "defaultValue": "euclidean" + }, + "sklearn/sklearn.metrics.pairwise/pairwise_distances_argmin/metric_kwargs": { + "target": "sklearn/sklearn.metrics.pairwise/pairwise_distances_argmin/metric_kwargs", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics.pairwise/pairwise_distances_argmin_min/axis": { + "target": "sklearn/sklearn.metrics.pairwise/pairwise_distances_argmin_min/axis", + "defaultType": "number", + "defaultValue": 1.0 + }, + "sklearn/sklearn.metrics.pairwise/pairwise_distances_argmin_min/metric": { + "target": "sklearn/sklearn.metrics.pairwise/pairwise_distances_argmin_min/metric", + "defaultType": "string", + "defaultValue": "euclidean" + }, + "sklearn/sklearn.metrics.pairwise/pairwise_distances_argmin_min/metric_kwargs": { + "target": "sklearn/sklearn.metrics.pairwise/pairwise_distances_argmin_min/metric_kwargs", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics.pairwise/pairwise_distances_chunked/Y": { + "target": "sklearn/sklearn.metrics.pairwise/pairwise_distances_chunked/Y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics.pairwise/pairwise_distances_chunked/metric": { + "target": "sklearn/sklearn.metrics.pairwise/pairwise_distances_chunked/metric", + "defaultType": "string", + "defaultValue": "cosine" + }, + "sklearn/sklearn.metrics.pairwise/pairwise_distances_chunked/n_jobs": { + "target": "sklearn/sklearn.metrics.pairwise/pairwise_distances_chunked/n_jobs", + "defaultType": "number", + "defaultValue": -1.0 + }, + "sklearn/sklearn.metrics.pairwise/pairwise_distances_chunked/working_memory": { + "target": "sklearn/sklearn.metrics.pairwise/pairwise_distances_chunked/working_memory", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics.pairwise/pairwise_kernels/Y": { + "target": "sklearn/sklearn.metrics.pairwise/pairwise_kernels/Y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics.pairwise/pairwise_kernels/metric": { + "target": "sklearn/sklearn.metrics.pairwise/pairwise_kernels/metric", + "defaultType": "string", + "defaultValue": "rbf" + }, + "sklearn/sklearn.metrics.pairwise/pairwise_kernels/filter_params": { + "target": "sklearn/sklearn.metrics.pairwise/pairwise_kernels/filter_params", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.metrics.pairwise/pairwise_kernels/n_jobs": { + "target": "sklearn/sklearn.metrics.pairwise/pairwise_kernels/n_jobs", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics.pairwise/polynomial_kernel/degree": { + "target": "sklearn/sklearn.metrics.pairwise/polynomial_kernel/degree", + "defaultType": "number", + "defaultValue": 3.0 + }, + "sklearn/sklearn.metrics.pairwise/polynomial_kernel/gamma": { + "target": "sklearn/sklearn.metrics.pairwise/polynomial_kernel/gamma", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics.pairwise/polynomial_kernel/coef0": { + "target": "sklearn/sklearn.metrics.pairwise/polynomial_kernel/coef0", + "defaultType": "number", + "defaultValue": 1.0 + }, + "sklearn/sklearn.metrics.pairwise/rbf_kernel/gamma": { + "target": "sklearn/sklearn.metrics.pairwise/rbf_kernel/gamma", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics.pairwise/sigmoid_kernel/gamma": { + "target": "sklearn/sklearn.metrics.pairwise/sigmoid_kernel/gamma", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.metrics.pairwise/sigmoid_kernel/coef0": { + "target": "sklearn/sklearn.metrics.pairwise/sigmoid_kernel/coef0", + "defaultType": "number", + "defaultValue": 1.0 + }, + "sklearn/sklearn.mixture._base/BaseMixture/score/y": { + "target": "sklearn/sklearn.mixture._base/BaseMixture/score/y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/__init__/covariance_type": { + "target": "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/__init__/covariance_type", + "defaultType": "string", + "defaultValue": "full" + }, + "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/__init__/tol": { + "target": "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/__init__/tol", + "defaultType": "number", + "defaultValue": 0.001 + }, + "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/__init__/reg_covar": { + "target": "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/__init__/reg_covar", + "defaultType": "number", + "defaultValue": 1e-6 + }, + "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/__init__/n_init": { + "target": "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/__init__/n_init", + "defaultType": "number", + "defaultValue": 1.0 + }, + "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/__init__/init_params": { + "target": "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/__init__/init_params", + "defaultType": "string", + "defaultValue": "kmeans" + }, + "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/__init__/weight_concentration_prior_type": { + "target": "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/__init__/weight_concentration_prior_type", + "defaultType": "string", + "defaultValue": "dirichlet_process" + }, + "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/__init__/mean_precision_prior": { + "target": "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/__init__/mean_precision_prior", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/__init__/mean_prior": { + "target": "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/__init__/mean_prior", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/__init__/degrees_of_freedom_prior": { + "target": "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/__init__/degrees_of_freedom_prior", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/__init__/covariance_prior": { + "target": "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/__init__/covariance_prior", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/__init__/warm_start": { + "target": "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/__init__/warm_start", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/__init__/verbose": { + "target": "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/__init__/verbose", + "defaultType": "number", + "defaultValue": 0.0 + }, + "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/__init__/verbose_interval": { + "target": "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/__init__/verbose_interval", + "defaultType": "number", + "defaultValue": 10.0 + }, + "sklearn/sklearn.mixture._gaussian_mixture/GaussianMixture/__init__/warm_start": { + "target": "sklearn/sklearn.mixture._gaussian_mixture/GaussianMixture/__init__/warm_start", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.model_selection._search_successive_halving/BaseSuccessiveHalving/fit/groups": { + "target": "sklearn/sklearn.model_selection._search_successive_halving/BaseSuccessiveHalving/fit/groups", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.model_selection._search_successive_halving/HalvingRandomSearchCV/__init__/n_candidates": { + "target": "sklearn/sklearn.model_selection._search_successive_halving/HalvingRandomSearchCV/__init__/n_candidates", + "defaultType": "string", + "defaultValue": "exhaust" + }, + "sklearn/sklearn.model_selection._search_successive_halving/HalvingRandomSearchCV/__init__/factor": { + "target": "sklearn/sklearn.model_selection._search_successive_halving/HalvingRandomSearchCV/__init__/factor", + "defaultType": "number", + "defaultValue": 3.0 + }, + "sklearn/sklearn.model_selection._search_successive_halving/HalvingRandomSearchCV/__init__/min_resources": { + "target": "sklearn/sklearn.model_selection._search_successive_halving/HalvingRandomSearchCV/__init__/min_resources", + "defaultType": "string", + "defaultValue": "smallest" + }, + "sklearn/sklearn.model_selection._search_successive_halving/HalvingRandomSearchCV/__init__/aggressive_elimination": { + "target": "sklearn/sklearn.model_selection._search_successive_halving/HalvingRandomSearchCV/__init__/aggressive_elimination", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.model_selection._search_successive_halving/HalvingRandomSearchCV/__init__/refit": { + "target": "sklearn/sklearn.model_selection._search_successive_halving/HalvingRandomSearchCV/__init__/refit", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.model_selection._search_successive_halving/HalvingRandomSearchCV/__init__/return_train_score": { + "target": "sklearn/sklearn.model_selection._search_successive_halving/HalvingRandomSearchCV/__init__/return_train_score", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.model_selection._search_successive_halving/HalvingRandomSearchCV/__init__/n_jobs": { + "target": "sklearn/sklearn.model_selection._search_successive_halving/HalvingRandomSearchCV/__init__/n_jobs", + "defaultType": "number", + "defaultValue": -1.0 + }, + "sklearn/sklearn.model_selection._split/BaseCrossValidator/split/y": { + "target": "sklearn/sklearn.model_selection._split/BaseCrossValidator/split/y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.model_selection._split/BaseCrossValidator/split/groups": { + "target": "sklearn/sklearn.model_selection._split/BaseCrossValidator/split/groups", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.model_selection._split/LeaveOneOut/get_n_splits/y": { + "target": "sklearn/sklearn.model_selection._split/LeaveOneOut/get_n_splits/y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.model_selection._split/LeaveOneOut/get_n_splits/groups": { + "target": "sklearn/sklearn.model_selection._split/LeaveOneOut/get_n_splits/groups", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.model_selection._split/PredefinedSplit/get_n_splits/X": { + "target": "sklearn/sklearn.model_selection._split/PredefinedSplit/get_n_splits/X", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.model_selection._split/PredefinedSplit/get_n_splits/y": { + "target": "sklearn/sklearn.model_selection._split/PredefinedSplit/get_n_splits/y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.model_selection._split/PredefinedSplit/get_n_splits/groups": { + "target": "sklearn/sklearn.model_selection._split/PredefinedSplit/get_n_splits/groups", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.model_selection._split/PredefinedSplit/split/X": { + "target": "sklearn/sklearn.model_selection._split/PredefinedSplit/split/X", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.model_selection._split/PredefinedSplit/split/y": { + "target": "sklearn/sklearn.model_selection._split/PredefinedSplit/split/y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.model_selection._split/PredefinedSplit/split/groups": { + "target": "sklearn/sklearn.model_selection._split/PredefinedSplit/split/groups", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.model_selection._split/_validate_shuffle_split/default_test_size": { + "target": "sklearn/sklearn.model_selection._split/_validate_shuffle_split/default_test_size", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.model_selection._validation/_score/error_score": { + "target": "sklearn/sklearn.model_selection._validation/_score/error_score", + "defaultType": "string", + "defaultValue": "raise" + }, + "sklearn/sklearn.model_selection._validation/cross_val_predict/pre_dispatch": { + "target": "sklearn/sklearn.model_selection._validation/cross_val_predict/pre_dispatch", + "defaultType": "string", + "defaultValue": "2*n_jobs" + }, + "sklearn/sklearn.model_selection._validation/cross_val_score/pre_dispatch": { + "target": "sklearn/sklearn.model_selection._validation/cross_val_score/pre_dispatch", + "defaultType": "string", + "defaultValue": "2*n_jobs" + }, + "sklearn/sklearn.model_selection._validation/cross_validate/pre_dispatch": { + "target": "sklearn/sklearn.model_selection._validation/cross_validate/pre_dispatch", + "defaultType": "string", + "defaultValue": "2*n_jobs" + }, + "sklearn/sklearn.model_selection._validation/learning_curve/groups": { + "target": "sklearn/sklearn.model_selection._validation/learning_curve/groups", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.model_selection._validation/learning_curve/exploit_incremental_learning": { + "target": "sklearn/sklearn.model_selection._validation/learning_curve/exploit_incremental_learning", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.model_selection._validation/learning_curve/pre_dispatch": { + "target": "sklearn/sklearn.model_selection._validation/learning_curve/pre_dispatch", + "defaultType": "string", + "defaultValue": "all" + }, + "sklearn/sklearn.model_selection._validation/validation_curve/groups": { + "target": "sklearn/sklearn.model_selection._validation/validation_curve/groups", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.model_selection._validation/validation_curve/pre_dispatch": { + "target": "sklearn/sklearn.model_selection._validation/validation_curve/pre_dispatch", + "defaultType": "string", + "defaultValue": "all" + }, + "sklearn/sklearn.model_selection._validation/validation_curve/fit_params": { + "target": "sklearn/sklearn.model_selection._validation/validation_curve/fit_params", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.multiclass/OneVsOneClassifier/__init__/n_jobs": { + "target": "sklearn/sklearn.multiclass/OneVsOneClassifier/__init__/n_jobs", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.multiclass/OneVsRestClassifier/__init__/verbose": { + "target": "sklearn/sklearn.multiclass/OneVsRestClassifier/__init__/verbose", + "defaultType": "number", + "defaultValue": 0.0 + }, + "sklearn/sklearn.multioutput/MultiOutputClassifier/fit/sample_weight": { + "target": "sklearn/sklearn.multioutput/MultiOutputClassifier/fit/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.multioutput/_BaseChain/__init__/cv": { + "target": "sklearn/sklearn.multioutput/_BaseChain/__init__/cv", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.multioutput/_MultiOutputEstimator/fit/sample_weight": { + "target": "sklearn/sklearn.multioutput/_MultiOutputEstimator/fit/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.naive_bayes/CategoricalNB/__init__/fit_prior": { + "target": "sklearn/sklearn.naive_bayes/CategoricalNB/__init__/fit_prior", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.naive_bayes/CategoricalNB/__init__/class_prior": { + "target": "sklearn/sklearn.naive_bayes/CategoricalNB/__init__/class_prior", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.naive_bayes/CategoricalNB/__init__/min_categories": { + "target": "sklearn/sklearn.naive_bayes/CategoricalNB/__init__/min_categories", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.naive_bayes/CategoricalNB/fit/sample_weight": { + "target": "sklearn/sklearn.naive_bayes/CategoricalNB/fit/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.naive_bayes/ComplementNB/__init__/fit_prior": { + "target": "sklearn/sklearn.naive_bayes/ComplementNB/__init__/fit_prior", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.naive_bayes/ComplementNB/__init__/class_prior": { + "target": "sklearn/sklearn.naive_bayes/ComplementNB/__init__/class_prior", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.naive_bayes/ComplementNB/__init__/norm": { + "target": "sklearn/sklearn.naive_bayes/ComplementNB/__init__/norm", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.naive_bayes/GaussianNB/partial_fit/sample_weight": { + "target": "sklearn/sklearn.naive_bayes/GaussianNB/partial_fit/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.naive_bayes/_BaseDiscreteNB/partial_fit/sample_weight": { + "target": "sklearn/sklearn.naive_bayes/_BaseDiscreteNB/partial_fit/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.neighbors._base/RadiusNeighborsMixin/radius_neighbors/sort_results": { + "target": "sklearn/sklearn.neighbors._base/RadiusNeighborsMixin/radius_neighbors/sort_results", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.neighbors._base/RadiusNeighborsMixin/radius_neighbors_graph/X": { + "target": "sklearn/sklearn.neighbors._base/RadiusNeighborsMixin/radius_neighbors_graph/X", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.neighbors._base/RadiusNeighborsMixin/radius_neighbors_graph/radius": { + "target": "sklearn/sklearn.neighbors._base/RadiusNeighborsMixin/radius_neighbors_graph/radius", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.neighbors._base/RadiusNeighborsMixin/radius_neighbors_graph/mode": { + "target": "sklearn/sklearn.neighbors._base/RadiusNeighborsMixin/radius_neighbors_graph/mode", + "defaultType": "string", + "defaultValue": "connectivity" + }, + "sklearn/sklearn.neighbors._base/RadiusNeighborsMixin/radius_neighbors_graph/sort_results": { + "target": "sklearn/sklearn.neighbors._base/RadiusNeighborsMixin/radius_neighbors_graph/sort_results", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.neighbors._classification/KNeighborsClassifier/__init__/metric_params": { + "target": "sklearn/sklearn.neighbors._classification/KNeighborsClassifier/__init__/metric_params", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.neighbors._classification/RadiusNeighborsClassifier/__init__/weights": { + "target": "sklearn/sklearn.neighbors._classification/RadiusNeighborsClassifier/__init__/weights", + "defaultType": "string", + "defaultValue": "uniform" + }, + "sklearn/sklearn.neighbors._classification/RadiusNeighborsClassifier/__init__/algorithm": { + "target": "sklearn/sklearn.neighbors._classification/RadiusNeighborsClassifier/__init__/algorithm", + "defaultType": "string", + "defaultValue": "auto" + }, + "sklearn/sklearn.neighbors._classification/RadiusNeighborsClassifier/__init__/leaf_size": { + "target": "sklearn/sklearn.neighbors._classification/RadiusNeighborsClassifier/__init__/leaf_size", + "defaultType": "number", + "defaultValue": 30.0 + }, + "sklearn/sklearn.neighbors._classification/RadiusNeighborsClassifier/__init__/p": { + "target": "sklearn/sklearn.neighbors._classification/RadiusNeighborsClassifier/__init__/p", + "defaultType": "number", + "defaultValue": 2.0 + }, + "sklearn/sklearn.neighbors._classification/RadiusNeighborsClassifier/__init__/metric": { + "target": "sklearn/sklearn.neighbors._classification/RadiusNeighborsClassifier/__init__/metric", + "defaultType": "string", + "defaultValue": "minkowski" + }, + "sklearn/sklearn.neighbors._classification/RadiusNeighborsClassifier/__init__/outlier_label": { + "target": "sklearn/sklearn.neighbors._classification/RadiusNeighborsClassifier/__init__/outlier_label", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.neighbors._classification/RadiusNeighborsClassifier/__init__/metric_params": { + "target": "sklearn/sklearn.neighbors._classification/RadiusNeighborsClassifier/__init__/metric_params", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.neighbors._classification/RadiusNeighborsClassifier/__init__/n_jobs": { + "target": "sklearn/sklearn.neighbors._classification/RadiusNeighborsClassifier/__init__/n_jobs", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.neighbors._graph/kneighbors_graph/mode": { + "target": "sklearn/sklearn.neighbors._graph/kneighbors_graph/mode", + "defaultType": "string", + "defaultValue": "connectivity" + }, + "sklearn/sklearn.neighbors._graph/kneighbors_graph/metric": { + "target": "sklearn/sklearn.neighbors._graph/kneighbors_graph/metric", + "defaultType": "string", + "defaultValue": "minkowski" + }, + "sklearn/sklearn.neighbors._graph/kneighbors_graph/p": { + "target": "sklearn/sklearn.neighbors._graph/kneighbors_graph/p", + "defaultType": "number", + "defaultValue": 2.0 + }, + "sklearn/sklearn.neighbors._graph/kneighbors_graph/metric_params": { + "target": "sklearn/sklearn.neighbors._graph/kneighbors_graph/metric_params", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.neighbors._graph/kneighbors_graph/include_self": { + "target": "sklearn/sklearn.neighbors._graph/kneighbors_graph/include_self", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.neighbors._graph/kneighbors_graph/n_jobs": { + "target": "sklearn/sklearn.neighbors._graph/kneighbors_graph/n_jobs", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.neighbors._kde/KernelDensity/__init__/algorithm": { + "target": "sklearn/sklearn.neighbors._kde/KernelDensity/__init__/algorithm", + "defaultType": "string", + "defaultValue": "auto" + }, + "sklearn/sklearn.neighbors._kde/KernelDensity/__init__/atol": { + "target": "sklearn/sklearn.neighbors._kde/KernelDensity/__init__/atol", + "defaultType": "number", + "defaultValue": 0.0 + }, + "sklearn/sklearn.neighbors._kde/KernelDensity/__init__/rtol": { + "target": "sklearn/sklearn.neighbors._kde/KernelDensity/__init__/rtol", + "defaultType": "number", + "defaultValue": 0.0 + }, + "sklearn/sklearn.neighbors._kde/KernelDensity/__init__/breadth_first": { + "target": "sklearn/sklearn.neighbors._kde/KernelDensity/__init__/breadth_first", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.neighbors._kde/KernelDensity/__init__/leaf_size": { + "target": "sklearn/sklearn.neighbors._kde/KernelDensity/__init__/leaf_size", + "defaultType": "number", + "defaultValue": 40.0 + }, + "sklearn/sklearn.neighbors._kde/KernelDensity/__init__/metric_params": { + "target": "sklearn/sklearn.neighbors._kde/KernelDensity/__init__/metric_params", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.neighbors._kde/KernelDensity/fit/y": { + "target": "sklearn/sklearn.neighbors._kde/KernelDensity/fit/y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.neighbors._kde/KernelDensity/fit/sample_weight": { + "target": "sklearn/sklearn.neighbors._kde/KernelDensity/fit/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.neighbors._kde/KernelDensity/sample/n_samples": { + "target": "sklearn/sklearn.neighbors._kde/KernelDensity/sample/n_samples", + "defaultType": "number", + "defaultValue": 100000.0 + }, + "sklearn/sklearn.neighbors._kde/KernelDensity/sample/random_state": { + "target": "sklearn/sklearn.neighbors._kde/KernelDensity/sample/random_state", + "defaultType": "number", + "defaultValue": 42.0 + }, + "sklearn/sklearn.neighbors._lof/LocalOutlierFactor/__init__/algorithm": { + "target": "sklearn/sklearn.neighbors._lof/LocalOutlierFactor/__init__/algorithm", + "defaultType": "string", + "defaultValue": "auto" + }, + "sklearn/sklearn.neighbors._lof/LocalOutlierFactor/__init__/leaf_size": { + "target": "sklearn/sklearn.neighbors._lof/LocalOutlierFactor/__init__/leaf_size", + "defaultType": "number", + "defaultValue": 30.0 + }, + "sklearn/sklearn.neighbors._lof/LocalOutlierFactor/__init__/metric_params": { + "target": "sklearn/sklearn.neighbors._lof/LocalOutlierFactor/__init__/metric_params", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.neighbors._lof/LocalOutlierFactor/__init__/novelty": { + "target": "sklearn/sklearn.neighbors._lof/LocalOutlierFactor/__init__/novelty", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.neighbors._lof/LocalOutlierFactor/fit/y": { + "target": "sklearn/sklearn.neighbors._lof/LocalOutlierFactor/fit/y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.neighbors._nca/NeighborhoodComponentsAnalysis/__init__/n_components": { + "target": "sklearn/sklearn.neighbors._nca/NeighborhoodComponentsAnalysis/__init__/n_components", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.neighbors._nca/NeighborhoodComponentsAnalysis/__init__/init": { + "target": "sklearn/sklearn.neighbors._nca/NeighborhoodComponentsAnalysis/__init__/init", + "defaultType": "string", + "defaultValue": "auto" + }, + "sklearn/sklearn.neighbors._nca/NeighborhoodComponentsAnalysis/__init__/warm_start": { + "target": "sklearn/sklearn.neighbors._nca/NeighborhoodComponentsAnalysis/__init__/warm_start", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.neighbors._nca/NeighborhoodComponentsAnalysis/__init__/max_iter": { + "target": "sklearn/sklearn.neighbors._nca/NeighborhoodComponentsAnalysis/__init__/max_iter", + "defaultType": "number", + "defaultValue": 50.0 + }, + "sklearn/sklearn.neighbors._nca/NeighborhoodComponentsAnalysis/__init__/tol": { + "target": "sklearn/sklearn.neighbors._nca/NeighborhoodComponentsAnalysis/__init__/tol", + "defaultType": "number", + "defaultValue": 1e-5 + }, + "sklearn/sklearn.neighbors._nca/NeighborhoodComponentsAnalysis/__init__/callback": { + "target": "sklearn/sklearn.neighbors._nca/NeighborhoodComponentsAnalysis/__init__/callback", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.neighbors._nca/NeighborhoodComponentsAnalysis/__init__/verbose": { + "target": "sklearn/sklearn.neighbors._nca/NeighborhoodComponentsAnalysis/__init__/verbose", + "defaultType": "number", + "defaultValue": 0.0 + }, + "sklearn/sklearn.neighbors._nca/NeighborhoodComponentsAnalysis/__init__/random_state": { + "target": "sklearn/sklearn.neighbors._nca/NeighborhoodComponentsAnalysis/__init__/random_state", + "defaultType": "number", + "defaultValue": 42.0 + }, + "sklearn/sklearn.neighbors._nearest_centroid/NearestCentroid/__init__/metric": { + "target": "sklearn/sklearn.neighbors._nearest_centroid/NearestCentroid/__init__/metric", + "defaultType": "string", + "defaultValue": "euclidean" + }, + "sklearn/sklearn.neighbors._nearest_centroid/NearestCentroid/__init__/shrink_threshold": { + "target": "sklearn/sklearn.neighbors._nearest_centroid/NearestCentroid/__init__/shrink_threshold", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.neighbors._regression/RadiusNeighborsRegressor/__init__/algorithm": { + "target": "sklearn/sklearn.neighbors._regression/RadiusNeighborsRegressor/__init__/algorithm", + "defaultType": "string", + "defaultValue": "auto" + }, + "sklearn/sklearn.neighbors._regression/RadiusNeighborsRegressor/__init__/leaf_size": { + "target": "sklearn/sklearn.neighbors._regression/RadiusNeighborsRegressor/__init__/leaf_size", + "defaultType": "number", + "defaultValue": 30.0 + }, + "sklearn/sklearn.neighbors._regression/RadiusNeighborsRegressor/__init__/metric_params": { + "target": "sklearn/sklearn.neighbors._regression/RadiusNeighborsRegressor/__init__/metric_params", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.neighbors._regression/RadiusNeighborsRegressor/__init__/n_jobs": { + "target": "sklearn/sklearn.neighbors._regression/RadiusNeighborsRegressor/__init__/n_jobs", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.neighbors._unsupervised/NearestNeighbors/__init__/leaf_size": { + "target": "sklearn/sklearn.neighbors._unsupervised/NearestNeighbors/__init__/leaf_size", + "defaultType": "number", + "defaultValue": 30.0 + }, + "sklearn/sklearn.neighbors._unsupervised/NearestNeighbors/__init__/p": { + "target": "sklearn/sklearn.neighbors._unsupervised/NearestNeighbors/__init__/p", + "defaultType": "number", + "defaultValue": 2.0 + }, + "sklearn/sklearn.neighbors._unsupervised/NearestNeighbors/__init__/metric_params": { + "target": "sklearn/sklearn.neighbors._unsupervised/NearestNeighbors/__init__/metric_params", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.neural_network._multilayer_perceptron/MLPClassifier/__init__/power_t": { + "target": "sklearn/sklearn.neural_network._multilayer_perceptron/MLPClassifier/__init__/power_t", + "defaultType": "number", + "defaultValue": 0.5 + }, + "sklearn/sklearn.neural_network._multilayer_perceptron/MLPClassifier/__init__/shuffle": { + "target": "sklearn/sklearn.neural_network._multilayer_perceptron/MLPClassifier/__init__/shuffle", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.neural_network._multilayer_perceptron/MLPClassifier/__init__/momentum": { + "target": "sklearn/sklearn.neural_network._multilayer_perceptron/MLPClassifier/__init__/momentum", + "defaultType": "number", + "defaultValue": 0.9 + }, + "sklearn/sklearn.neural_network._multilayer_perceptron/MLPClassifier/__init__/nesterovs_momentum": { + "target": "sklearn/sklearn.neural_network._multilayer_perceptron/MLPClassifier/__init__/nesterovs_momentum", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.neural_network._multilayer_perceptron/MLPClassifier/__init__/max_fun": { + "target": "sklearn/sklearn.neural_network._multilayer_perceptron/MLPClassifier/__init__/max_fun", + "defaultType": "number", + "defaultValue": 15000.0 + }, + "sklearn/sklearn.neural_network._multilayer_perceptron/MLPRegressor/__init__/power_t": { + "target": "sklearn/sklearn.neural_network._multilayer_perceptron/MLPRegressor/__init__/power_t", + "defaultType": "number", + "defaultValue": 0.5 + }, + "sklearn/sklearn.neural_network._multilayer_perceptron/MLPRegressor/__init__/nesterovs_momentum": { + "target": "sklearn/sklearn.neural_network._multilayer_perceptron/MLPRegressor/__init__/nesterovs_momentum", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.neural_network._multilayer_perceptron/MLPRegressor/__init__/beta_1": { + "target": "sklearn/sklearn.neural_network._multilayer_perceptron/MLPRegressor/__init__/beta_1", + "defaultType": "number", + "defaultValue": 0.9 + }, + "sklearn/sklearn.neural_network._multilayer_perceptron/MLPRegressor/__init__/beta_2": { + "target": "sklearn/sklearn.neural_network._multilayer_perceptron/MLPRegressor/__init__/beta_2", + "defaultType": "number", + "defaultValue": 0.999 + }, + "sklearn/sklearn.neural_network._multilayer_perceptron/MLPRegressor/__init__/epsilon": { + "target": "sklearn/sklearn.neural_network._multilayer_perceptron/MLPRegressor/__init__/epsilon", + "defaultType": "number", + "defaultValue": 1e-8 + }, + "sklearn/sklearn.neural_network._rbm/BernoulliRBM/__init__/n_components": { + "target": "sklearn/sklearn.neural_network._rbm/BernoulliRBM/__init__/n_components", + "defaultType": "number", + "defaultValue": 2.0 + }, + "sklearn/sklearn.neural_network._rbm/BernoulliRBM/__init__/learning_rate": { + "target": "sklearn/sklearn.neural_network._rbm/BernoulliRBM/__init__/learning_rate", + "defaultType": "number", + "defaultValue": 0.1 + }, + "sklearn/sklearn.neural_network._rbm/BernoulliRBM/__init__/batch_size": { + "target": "sklearn/sklearn.neural_network._rbm/BernoulliRBM/__init__/batch_size", + "defaultType": "number", + "defaultValue": 10.0 + }, + "sklearn/sklearn.neural_network._rbm/BernoulliRBM/__init__/n_iter": { + "target": "sklearn/sklearn.neural_network._rbm/BernoulliRBM/__init__/n_iter", + "defaultType": "number", + "defaultValue": 10.0 + }, + "sklearn/sklearn.neural_network._rbm/BernoulliRBM/__init__/verbose": { + "target": "sklearn/sklearn.neural_network._rbm/BernoulliRBM/__init__/verbose", + "defaultType": "number", + "defaultValue": 0.0 + }, + "sklearn/sklearn.neural_network._rbm/BernoulliRBM/fit/y": { + "target": "sklearn/sklearn.neural_network._rbm/BernoulliRBM/fit/y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.pipeline/Pipeline/_iter/filter_passthrough": { + "target": "sklearn/sklearn.pipeline/Pipeline/_iter/filter_passthrough", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.pipeline/Pipeline/get_params/deep": { + "target": "sklearn/sklearn.pipeline/Pipeline/get_params/deep", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.pipeline/Pipeline/score/sample_weight": { + "target": "sklearn/sklearn.pipeline/Pipeline/score/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.pipeline/make_pipeline/memory": { + "target": "sklearn/sklearn.pipeline/make_pipeline/memory", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.pipeline/make_union/verbose": { + "target": "sklearn/sklearn.pipeline/make_union/verbose", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.preprocessing._data/Binarizer/__init__/copy": { + "target": "sklearn/sklearn.preprocessing._data/Binarizer/__init__/copy", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.preprocessing._data/Binarizer/fit/y": { + "target": "sklearn/sklearn.preprocessing._data/Binarizer/fit/y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.preprocessing._data/Binarizer/transform/copy": { + "target": "sklearn/sklearn.preprocessing._data/Binarizer/transform/copy", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.preprocessing._data/KernelCenterer/transform/copy": { + "target": "sklearn/sklearn.preprocessing._data/KernelCenterer/transform/copy", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.preprocessing._data/MaxAbsScaler/fit/y": { + "target": "sklearn/sklearn.preprocessing._data/MaxAbsScaler/fit/y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.preprocessing._data/MinMaxScaler/__init__/clip": { + "target": "sklearn/sklearn.preprocessing._data/MinMaxScaler/__init__/clip", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.preprocessing._data/MinMaxScaler/partial_fit/y": { + "target": "sklearn/sklearn.preprocessing._data/MinMaxScaler/partial_fit/y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.preprocessing._data/Normalizer/fit/y": { + "target": "sklearn/sklearn.preprocessing._data/Normalizer/fit/y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.preprocessing._data/Normalizer/transform/copy": { + "target": "sklearn/sklearn.preprocessing._data/Normalizer/transform/copy", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.preprocessing._data/PowerTransformer/__init__/standardize": { + "target": "sklearn/sklearn.preprocessing._data/PowerTransformer/__init__/standardize", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.preprocessing._data/PowerTransformer/fit/y": { + "target": "sklearn/sklearn.preprocessing._data/PowerTransformer/fit/y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.preprocessing._data/PowerTransformer/fit_transform/y": { + "target": "sklearn/sklearn.preprocessing._data/PowerTransformer/fit_transform/y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.preprocessing._data/QuantileTransformer/__init__/ignore_implicit_zeros": { + "target": "sklearn/sklearn.preprocessing._data/QuantileTransformer/__init__/ignore_implicit_zeros", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.preprocessing._data/QuantileTransformer/fit/y": { + "target": "sklearn/sklearn.preprocessing._data/QuantileTransformer/fit/y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.preprocessing._data/RobustScaler/fit/y": { + "target": "sklearn/sklearn.preprocessing._data/RobustScaler/fit/y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.preprocessing._data/StandardScaler/fit/sample_weight": { + "target": "sklearn/sklearn.preprocessing._data/StandardScaler/fit/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.preprocessing._data/StandardScaler/inverse_transform/copy": { + "target": "sklearn/sklearn.preprocessing._data/StandardScaler/inverse_transform/copy", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.preprocessing._data/StandardScaler/partial_fit/y": { + "target": "sklearn/sklearn.preprocessing._data/StandardScaler/partial_fit/y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.preprocessing._data/StandardScaler/partial_fit/sample_weight": { + "target": "sklearn/sklearn.preprocessing._data/StandardScaler/partial_fit/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.preprocessing._data/StandardScaler/transform/copy": { + "target": "sklearn/sklearn.preprocessing._data/StandardScaler/transform/copy", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.preprocessing._data/binarize/copy": { + "target": "sklearn/sklearn.preprocessing._data/binarize/copy", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.preprocessing._data/maxabs_scale/axis": { + "target": "sklearn/sklearn.preprocessing._data/maxabs_scale/axis", + "defaultType": "number", + "defaultValue": 0.0 + }, + "sklearn/sklearn.preprocessing._data/maxabs_scale/copy": { + "target": "sklearn/sklearn.preprocessing._data/maxabs_scale/copy", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.preprocessing._data/power_transform/method": { + "target": "sklearn/sklearn.preprocessing._data/power_transform/method", + "defaultType": "string", + "defaultValue": "yeo-johnson" + }, + "sklearn/sklearn.preprocessing._data/power_transform/standardize": { + "target": "sklearn/sklearn.preprocessing._data/power_transform/standardize", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.preprocessing._data/power_transform/copy": { + "target": "sklearn/sklearn.preprocessing._data/power_transform/copy", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.preprocessing._data/quantile_transform/axis": { + "target": "sklearn/sklearn.preprocessing._data/quantile_transform/axis", + "defaultType": "number", + "defaultValue": 0.0 + }, + "sklearn/sklearn.preprocessing._data/quantile_transform/ignore_implicit_zeros": { + "target": "sklearn/sklearn.preprocessing._data/quantile_transform/ignore_implicit_zeros", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.preprocessing._data/quantile_transform/copy": { + "target": "sklearn/sklearn.preprocessing._data/quantile_transform/copy", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.preprocessing._data/robust_scale/axis": { + "target": "sklearn/sklearn.preprocessing._data/robust_scale/axis", + "defaultType": "number", + "defaultValue": 0.0 + }, + "sklearn/sklearn.preprocessing._data/robust_scale/with_centering": { + "target": "sklearn/sklearn.preprocessing._data/robust_scale/with_centering", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.preprocessing._data/robust_scale/with_scaling": { + "target": "sklearn/sklearn.preprocessing._data/robust_scale/with_scaling", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.preprocessing._data/robust_scale/copy": { + "target": "sklearn/sklearn.preprocessing._data/robust_scale/copy", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.preprocessing._data/robust_scale/unit_variance": { + "target": "sklearn/sklearn.preprocessing._data/robust_scale/unit_variance", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.preprocessing._data/scale/copy": { + "target": "sklearn/sklearn.preprocessing._data/scale/copy", "defaultType": "boolean", "defaultValue": true }, - "sklearn/sklearn.metrics.pairwise/pairwise_distances_argmin/axis": { - "target": "sklearn/sklearn.metrics.pairwise/pairwise_distances_argmin/axis", - "defaultType": "number", - "defaultValue": 1.0 - }, - "sklearn/sklearn.metrics.pairwise/pairwise_distances_argmin/metric": { - "target": "sklearn/sklearn.metrics.pairwise/pairwise_distances_argmin/metric", - "defaultType": "string", - "defaultValue": "euclidean" - }, - "sklearn/sklearn.metrics.pairwise/pairwise_distances_argmin/metric_kwargs": { - "target": "sklearn/sklearn.metrics.pairwise/pairwise_distances_argmin/metric_kwargs", + "sklearn/sklearn.preprocessing._discretization/KBinsDiscretizer/__init__/dtype": { + "target": "sklearn/sklearn.preprocessing._discretization/KBinsDiscretizer/__init__/dtype", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.metrics.pairwise/pairwise_distances_argmin_min/axis": { - "target": "sklearn/sklearn.metrics.pairwise/pairwise_distances_argmin_min/axis", - "defaultType": "number", - "defaultValue": 1.0 - }, - "sklearn/sklearn.metrics.pairwise/pairwise_distances_argmin_min/metric": { - "target": "sklearn/sklearn.metrics.pairwise/pairwise_distances_argmin_min/metric", + "sklearn/sklearn.preprocessing._discretization/KBinsDiscretizer/__init__/subsample": { + "target": "sklearn/sklearn.preprocessing._discretization/KBinsDiscretizer/__init__/subsample", "defaultType": "string", - "defaultValue": "euclidean" + "defaultValue": "warn" }, - "sklearn/sklearn.metrics.pairwise/pairwise_distances_argmin_min/metric_kwargs": { - "target": "sklearn/sklearn.metrics.pairwise/pairwise_distances_argmin_min/metric_kwargs", + "sklearn/sklearn.preprocessing._discretization/KBinsDiscretizer/__init__/random_state": { + "target": "sklearn/sklearn.preprocessing._discretization/KBinsDiscretizer/__init__/random_state", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.metrics.pairwise/pairwise_distances_chunked/Y": { - "target": "sklearn/sklearn.metrics.pairwise/pairwise_distances_chunked/Y", + "sklearn/sklearn.preprocessing._discretization/KBinsDiscretizer/fit/y": { + "target": "sklearn/sklearn.preprocessing._discretization/KBinsDiscretizer/fit/y", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.metrics.pairwise/pairwise_distances_chunked/metric": { - "target": "sklearn/sklearn.metrics.pairwise/pairwise_distances_chunked/metric", - "defaultType": "string", - "defaultValue": "cosine" - }, - "sklearn/sklearn.metrics.pairwise/pairwise_distances_chunked/n_jobs": { - "target": "sklearn/sklearn.metrics.pairwise/pairwise_distances_chunked/n_jobs", - "defaultType": "number", - "defaultValue": -1.0 - }, - "sklearn/sklearn.metrics.pairwise/pairwise_distances_chunked/working_memory": { - "target": "sklearn/sklearn.metrics.pairwise/pairwise_distances_chunked/working_memory", + "sklearn/sklearn.preprocessing._encoders/OneHotEncoder/__init__/min_frequency": { + "target": "sklearn/sklearn.preprocessing._encoders/OneHotEncoder/__init__/min_frequency", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.metrics.pairwise/pairwise_kernels/Y": { - "target": "sklearn/sklearn.metrics.pairwise/pairwise_kernels/Y", + "sklearn/sklearn.preprocessing._encoders/OneHotEncoder/__init__/max_categories": { + "target": "sklearn/sklearn.preprocessing._encoders/OneHotEncoder/__init__/max_categories", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.metrics.pairwise/pairwise_kernels/metric": { - "target": "sklearn/sklearn.metrics.pairwise/pairwise_kernels/metric", - "defaultType": "string", - "defaultValue": "rbf" - }, - "sklearn/sklearn.metrics.pairwise/pairwise_kernels/filter_params": { - "target": "sklearn/sklearn.metrics.pairwise/pairwise_kernels/filter_params", - "defaultType": "boolean", - "defaultValue": true - }, - "sklearn/sklearn.metrics.pairwise/pairwise_kernels/n_jobs": { - "target": "sklearn/sklearn.metrics.pairwise/pairwise_kernels/n_jobs", + "sklearn/sklearn.preprocessing._encoders/OneHotEncoder/fit/y": { + "target": "sklearn/sklearn.preprocessing._encoders/OneHotEncoder/fit/y", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.metrics.pairwise/polynomial_kernel/degree": { - "target": "sklearn/sklearn.metrics.pairwise/polynomial_kernel/degree", - "defaultType": "number", - "defaultValue": 3.0 - }, - "sklearn/sklearn.metrics.pairwise/polynomial_kernel/gamma": { - "target": "sklearn/sklearn.metrics.pairwise/polynomial_kernel/gamma", + "sklearn/sklearn.preprocessing._encoders/OrdinalEncoder/fit/y": { + "target": "sklearn/sklearn.preprocessing._encoders/OrdinalEncoder/fit/y", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.metrics.pairwise/polynomial_kernel/coef0": { - "target": "sklearn/sklearn.metrics.pairwise/polynomial_kernel/coef0", - "defaultType": "number", - "defaultValue": 1.0 - }, - "sklearn/sklearn.metrics.pairwise/rbf_kernel/gamma": { - "target": "sklearn/sklearn.metrics.pairwise/rbf_kernel/gamma", + "sklearn/sklearn.preprocessing._function_transformer/FunctionTransformer/__init__/feature_names_out": { + "target": "sklearn/sklearn.preprocessing._function_transformer/FunctionTransformer/__init__/feature_names_out", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.metrics.pairwise/sigmoid_kernel/gamma": { - "target": "sklearn/sklearn.metrics.pairwise/sigmoid_kernel/gamma", + "sklearn/sklearn.preprocessing._function_transformer/FunctionTransformer/__init__/inv_kw_args": { + "target": "sklearn/sklearn.preprocessing._function_transformer/FunctionTransformer/__init__/inv_kw_args", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.metrics.pairwise/sigmoid_kernel/coef0": { - "target": "sklearn/sklearn.metrics.pairwise/sigmoid_kernel/coef0", + "sklearn/sklearn.preprocessing._label/LabelBinarizer/__init__/neg_label": { + "target": "sklearn/sklearn.preprocessing._label/LabelBinarizer/__init__/neg_label", + "defaultType": "number", + "defaultValue": 0.0 + }, + "sklearn/sklearn.preprocessing._label/LabelBinarizer/__init__/pos_label": { + "target": "sklearn/sklearn.preprocessing._label/LabelBinarizer/__init__/pos_label", "defaultType": "number", "defaultValue": 1.0 }, - "sklearn/sklearn.model_selection._validation/cross_val_predict/pre_dispatch": { - "target": "sklearn/sklearn.model_selection._validation/cross_val_predict/pre_dispatch", - "defaultType": "string", - "defaultValue": "2*n_jobs" + "sklearn/sklearn.preprocessing._label/label_binarize/neg_label": { + "target": "sklearn/sklearn.preprocessing._label/label_binarize/neg_label", + "defaultType": "number", + "defaultValue": 0.0 }, - "sklearn/sklearn.model_selection._validation/cross_val_score/pre_dispatch": { - "target": "sklearn/sklearn.model_selection._validation/cross_val_score/pre_dispatch", - "defaultType": "string", - "defaultValue": "2*n_jobs" + "sklearn/sklearn.preprocessing._label/label_binarize/pos_label": { + "target": "sklearn/sklearn.preprocessing._label/label_binarize/pos_label", + "defaultType": "number", + "defaultValue": 1.0 }, - "sklearn/sklearn.model_selection._validation/cross_validate/pre_dispatch": { - "target": "sklearn/sklearn.model_selection._validation/cross_validate/pre_dispatch", + "sklearn/sklearn.preprocessing._label/label_binarize/sparse_output": { + "target": "sklearn/sklearn.preprocessing._label/label_binarize/sparse_output", + "defaultType": "boolean", + "defaultValue": false + }, + "sklearn/sklearn.preprocessing._polynomial/PolynomialFeatures/__init__/order": { + "target": "sklearn/sklearn.preprocessing._polynomial/PolynomialFeatures/__init__/order", "defaultType": "string", - "defaultValue": "2*n_jobs" + "defaultValue": "C" }, - "sklearn/sklearn.model_selection._validation/learning_curve/groups": { - "target": "sklearn/sklearn.model_selection._validation/learning_curve/groups", + "sklearn/sklearn.random_projection/BaseRandomProjection/fit/y": { + "target": "sklearn/sklearn.random_projection/BaseRandomProjection/fit/y", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.model_selection._validation/learning_curve/exploit_incremental_learning": { - "target": "sklearn/sklearn.model_selection._validation/learning_curve/exploit_incremental_learning", + "sklearn/sklearn.random_projection/GaussianRandomProjection/__init__/compute_inverse_components": { + "target": "sklearn/sklearn.random_projection/GaussianRandomProjection/__init__/compute_inverse_components", "defaultType": "boolean", "defaultValue": false }, - "sklearn/sklearn.model_selection._validation/learning_curve/pre_dispatch": { - "target": "sklearn/sklearn.model_selection._validation/learning_curve/pre_dispatch", + "sklearn/sklearn.random_projection/SparseRandomProjection/__init__/density": { + "target": "sklearn/sklearn.random_projection/SparseRandomProjection/__init__/density", "defaultType": "string", - "defaultValue": "all" + "defaultValue": "auto" }, - "sklearn/sklearn.model_selection._validation/validation_curve/groups": { - "target": "sklearn/sklearn.model_selection._validation/validation_curve/groups", - "defaultType": "none", - "defaultValue": null + "sklearn/sklearn.random_projection/SparseRandomProjection/__init__/compute_inverse_components": { + "target": "sklearn/sklearn.random_projection/SparseRandomProjection/__init__/compute_inverse_components", + "defaultType": "boolean", + "defaultValue": false }, - "sklearn/sklearn.model_selection._validation/validation_curve/pre_dispatch": { - "target": "sklearn/sklearn.model_selection._validation/validation_curve/pre_dispatch", + "sklearn/sklearn.semi_supervised._label_propagation/LabelPropagation/__init__/kernel": { + "target": "sklearn/sklearn.semi_supervised._label_propagation/LabelPropagation/__init__/kernel", "defaultType": "string", - "defaultValue": "all" + "defaultValue": "rbf" }, - "sklearn/sklearn.model_selection._validation/validation_curve/fit_params": { - "target": "sklearn/sklearn.model_selection._validation/validation_curve/fit_params", - "defaultType": "none", - "defaultValue": null + "sklearn/sklearn.semi_supervised._label_propagation/LabelPropagation/__init__/gamma": { + "target": "sklearn/sklearn.semi_supervised._label_propagation/LabelPropagation/__init__/gamma", + "defaultType": "number", + "defaultValue": 20.0 }, - "sklearn/sklearn.multiclass/OneVsOneClassifier/__init__/n_jobs": { - "target": "sklearn/sklearn.multiclass/OneVsOneClassifier/__init__/n_jobs", - "defaultType": "none", - "defaultValue": null + "sklearn/sklearn.semi_supervised._label_propagation/LabelPropagation/__init__/n_neighbors": { + "target": "sklearn/sklearn.semi_supervised._label_propagation/LabelPropagation/__init__/n_neighbors", + "defaultType": "number", + "defaultValue": 7.0 }, - "sklearn/sklearn.multiclass/OneVsRestClassifier/__init__/verbose": { - "target": "sklearn/sklearn.multiclass/OneVsRestClassifier/__init__/verbose", + "sklearn/sklearn.semi_supervised._label_propagation/LabelPropagation/__init__/max_iter": { + "target": "sklearn/sklearn.semi_supervised._label_propagation/LabelPropagation/__init__/max_iter", "defaultType": "number", - "defaultValue": 0.0 + "defaultValue": 1000.0 }, - "sklearn/sklearn.multioutput/MultiOutputClassifier/fit/sample_weight": { - "target": "sklearn/sklearn.multioutput/MultiOutputClassifier/fit/sample_weight", + "sklearn/sklearn.semi_supervised._label_propagation/LabelPropagation/__init__/tol": { + "target": "sklearn/sklearn.semi_supervised._label_propagation/LabelPropagation/__init__/tol", + "defaultType": "number", + "defaultValue": 0.001 + }, + "sklearn/sklearn.semi_supervised._label_propagation/LabelPropagation/__init__/n_jobs": { + "target": "sklearn/sklearn.semi_supervised._label_propagation/LabelPropagation/__init__/n_jobs", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.naive_bayes/CategoricalNB/__init__/fit_prior": { - "target": "sklearn/sklearn.naive_bayes/CategoricalNB/__init__/fit_prior", - "defaultType": "boolean", - "defaultValue": true + "sklearn/sklearn.semi_supervised._label_propagation/LabelSpreading/__init__/tol": { + "target": "sklearn/sklearn.semi_supervised._label_propagation/LabelSpreading/__init__/tol", + "defaultType": "number", + "defaultValue": 0.001 }, - "sklearn/sklearn.naive_bayes/CategoricalNB/__init__/class_prior": { - "target": "sklearn/sklearn.naive_bayes/CategoricalNB/__init__/class_prior", + "sklearn/sklearn.svm._base/BaseLibSVM/fit/sample_weight": { + "target": "sklearn/sklearn.svm._base/BaseLibSVM/fit/sample_weight", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.naive_bayes/CategoricalNB/__init__/min_categories": { - "target": "sklearn/sklearn.naive_bayes/CategoricalNB/__init__/min_categories", - "defaultType": "none", - "defaultValue": null + "sklearn/sklearn.svm._classes/LinearSVC/__init__/intercept_scaling": { + "target": "sklearn/sklearn.svm._classes/LinearSVC/__init__/intercept_scaling", + "defaultType": "number", + "defaultValue": 1.0 }, - "sklearn/sklearn.naive_bayes/CategoricalNB/fit/sample_weight": { - "target": "sklearn/sklearn.naive_bayes/CategoricalNB/fit/sample_weight", + "sklearn/sklearn.svm._classes/LinearSVC/fit/sample_weight": { + "target": "sklearn/sklearn.svm._classes/LinearSVC/fit/sample_weight", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.naive_bayes/ComplementNB/__init__/fit_prior": { - "target": "sklearn/sklearn.naive_bayes/ComplementNB/__init__/fit_prior", + "sklearn/sklearn.svm._classes/LinearSVR/__init__/dual": { + "target": "sklearn/sklearn.svm._classes/LinearSVR/__init__/dual", "defaultType": "boolean", "defaultValue": true }, - "sklearn/sklearn.naive_bayes/ComplementNB/__init__/class_prior": { - "target": "sklearn/sklearn.naive_bayes/ComplementNB/__init__/class_prior", + "sklearn/sklearn.svm._classes/LinearSVR/fit/sample_weight": { + "target": "sklearn/sklearn.svm._classes/LinearSVR/fit/sample_weight", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.naive_bayes/ComplementNB/__init__/norm": { - "target": "sklearn/sklearn.naive_bayes/ComplementNB/__init__/norm", + "sklearn/sklearn.svm._classes/NuSVC/__init__/shrinking": { + "target": "sklearn/sklearn.svm._classes/NuSVC/__init__/shrinking", "defaultType": "boolean", - "defaultValue": false - }, - "sklearn/sklearn.naive_bayes/GaussianNB/partial_fit/sample_weight": { - "target": "sklearn/sklearn.naive_bayes/GaussianNB/partial_fit/sample_weight", - "defaultType": "none", - "defaultValue": null - }, - "sklearn/sklearn.neighbors._graph/kneighbors_graph/mode": { - "target": "sklearn/sklearn.neighbors._graph/kneighbors_graph/mode", - "defaultType": "string", - "defaultValue": "connectivity" - }, - "sklearn/sklearn.neighbors._graph/kneighbors_graph/metric": { - "target": "sklearn/sklearn.neighbors._graph/kneighbors_graph/metric", - "defaultType": "string", - "defaultValue": "minkowski" - }, - "sklearn/sklearn.neighbors._graph/kneighbors_graph/p": { - "target": "sklearn/sklearn.neighbors._graph/kneighbors_graph/p", - "defaultType": "number", - "defaultValue": 2.0 + "defaultValue": true }, - "sklearn/sklearn.neighbors._graph/kneighbors_graph/metric_params": { - "target": "sklearn/sklearn.neighbors._graph/kneighbors_graph/metric_params", + "sklearn/sklearn.svm._classes/NuSVC/__init__/class_weight": { + "target": "sklearn/sklearn.svm._classes/NuSVC/__init__/class_weight", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.neighbors._graph/kneighbors_graph/include_self": { - "target": "sklearn/sklearn.neighbors._graph/kneighbors_graph/include_self", + "sklearn/sklearn.svm._classes/NuSVC/__init__/verbose": { + "target": "sklearn/sklearn.svm._classes/NuSVC/__init__/verbose", "defaultType": "boolean", "defaultValue": false }, - "sklearn/sklearn.neighbors._graph/kneighbors_graph/n_jobs": { - "target": "sklearn/sklearn.neighbors._graph/kneighbors_graph/n_jobs", - "defaultType": "none", - "defaultValue": null - }, - "sklearn/sklearn.pipeline/Pipeline/get_params/deep": { - "target": "sklearn/sklearn.pipeline/Pipeline/get_params/deep", - "defaultType": "boolean", - "defaultValue": true - }, - "sklearn/sklearn.pipeline/Pipeline/score/sample_weight": { - "target": "sklearn/sklearn.pipeline/Pipeline/score/sample_weight", - "defaultType": "none", - "defaultValue": null - }, - "sklearn/sklearn.pipeline/make_pipeline/memory": { - "target": "sklearn/sklearn.pipeline/make_pipeline/memory", - "defaultType": "none", - "defaultValue": null + "sklearn/sklearn.svm._classes/NuSVC/__init__/decision_function_shape": { + "target": "sklearn/sklearn.svm._classes/NuSVC/__init__/decision_function_shape", + "defaultType": "string", + "defaultValue": "ovr" }, - "sklearn/sklearn.pipeline/make_union/verbose": { - "target": "sklearn/sklearn.pipeline/make_union/verbose", + "sklearn/sklearn.svm._classes/NuSVC/__init__/break_ties": { + "target": "sklearn/sklearn.svm._classes/NuSVC/__init__/break_ties", "defaultType": "boolean", "defaultValue": false }, - "sklearn/sklearn.preprocessing._data/binarize/copy": { - "target": "sklearn/sklearn.preprocessing._data/binarize/copy", - "defaultType": "boolean", - "defaultValue": true + "sklearn/sklearn.svm._classes/NuSVR/__init__/degree": { + "target": "sklearn/sklearn.svm._classes/NuSVR/__init__/degree", + "defaultType": "number", + "defaultValue": 3.0 }, - "sklearn/sklearn.preprocessing._data/maxabs_scale/axis": { - "target": "sklearn/sklearn.preprocessing._data/maxabs_scale/axis", + "sklearn/sklearn.svm._classes/NuSVR/__init__/coef0": { + "target": "sklearn/sklearn.svm._classes/NuSVR/__init__/coef0", "defaultType": "number", "defaultValue": 0.0 }, - "sklearn/sklearn.preprocessing._data/maxabs_scale/copy": { - "target": "sklearn/sklearn.preprocessing._data/maxabs_scale/copy", + "sklearn/sklearn.svm._classes/NuSVR/__init__/shrinking": { + "target": "sklearn/sklearn.svm._classes/NuSVR/__init__/shrinking", "defaultType": "boolean", "defaultValue": true }, - "sklearn/sklearn.preprocessing._data/power_transform/method": { - "target": "sklearn/sklearn.preprocessing._data/power_transform/method", + "sklearn/sklearn.svm._classes/NuSVR/__init__/cache_size": { + "target": "sklearn/sklearn.svm._classes/NuSVR/__init__/cache_size", + "defaultType": "number", + "defaultValue": 200.0 + }, + "sklearn/sklearn.svm._classes/OneClassSVM/__init__/kernel": { + "target": "sklearn/sklearn.svm._classes/OneClassSVM/__init__/kernel", "defaultType": "string", - "defaultValue": "yeo-johnson" + "defaultValue": "rbf" }, - "sklearn/sklearn.preprocessing._data/power_transform/standardize": { - "target": "sklearn/sklearn.preprocessing._data/power_transform/standardize", - "defaultType": "boolean", - "defaultValue": true + "sklearn/sklearn.svm._classes/OneClassSVM/__init__/degree": { + "target": "sklearn/sklearn.svm._classes/OneClassSVM/__init__/degree", + "defaultType": "number", + "defaultValue": 3.0 }, - "sklearn/sklearn.preprocessing._data/power_transform/copy": { - "target": "sklearn/sklearn.preprocessing._data/power_transform/copy", + "sklearn/sklearn.svm._classes/OneClassSVM/__init__/coef0": { + "target": "sklearn/sklearn.svm._classes/OneClassSVM/__init__/coef0", + "defaultType": "number", + "defaultValue": 0.0 + }, + "sklearn/sklearn.svm._classes/OneClassSVM/__init__/tol": { + "target": "sklearn/sklearn.svm._classes/OneClassSVM/__init__/tol", + "defaultType": "number", + "defaultValue": 0.001 + }, + "sklearn/sklearn.svm._classes/OneClassSVM/__init__/shrinking": { + "target": "sklearn/sklearn.svm._classes/OneClassSVM/__init__/shrinking", "defaultType": "boolean", "defaultValue": true }, - "sklearn/sklearn.preprocessing._data/quantile_transform/axis": { - "target": "sklearn/sklearn.preprocessing._data/quantile_transform/axis", + "sklearn/sklearn.svm._classes/OneClassSVM/__init__/cache_size": { + "target": "sklearn/sklearn.svm._classes/OneClassSVM/__init__/cache_size", "defaultType": "number", - "defaultValue": 0.0 + "defaultValue": 200.0 }, - "sklearn/sklearn.preprocessing._data/quantile_transform/ignore_implicit_zeros": { - "target": "sklearn/sklearn.preprocessing._data/quantile_transform/ignore_implicit_zeros", + "sklearn/sklearn.svm._classes/OneClassSVM/__init__/verbose": { + "target": "sklearn/sklearn.svm._classes/OneClassSVM/__init__/verbose", "defaultType": "boolean", "defaultValue": false }, - "sklearn/sklearn.preprocessing._data/quantile_transform/copy": { - "target": "sklearn/sklearn.preprocessing._data/quantile_transform/copy", + "sklearn/sklearn.svm._classes/OneClassSVM/__init__/max_iter": { + "target": "sklearn/sklearn.svm._classes/OneClassSVM/__init__/max_iter", + "defaultType": "number", + "defaultValue": -1.0 + }, + "sklearn/sklearn.svm._classes/OneClassSVM/fit/y": { + "target": "sklearn/sklearn.svm._classes/OneClassSVM/fit/y", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.svm._classes/OneClassSVM/fit/sample_weight": { + "target": "sklearn/sklearn.svm._classes/OneClassSVM/fit/sample_weight", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.tree._classes/BaseDecisionTree/apply/check_input": { + "target": "sklearn/sklearn.tree._classes/BaseDecisionTree/apply/check_input", "defaultType": "boolean", "defaultValue": true }, - "sklearn/sklearn.preprocessing._data/robust_scale/axis": { - "target": "sklearn/sklearn.preprocessing._data/robust_scale/axis", - "defaultType": "number", - "defaultValue": 0.0 + "sklearn/sklearn.tree._classes/BaseDecisionTree/cost_complexity_pruning_path/sample_weight": { + "target": "sklearn/sklearn.tree._classes/BaseDecisionTree/cost_complexity_pruning_path/sample_weight", + "defaultType": "none", + "defaultValue": null }, - "sklearn/sklearn.preprocessing._data/robust_scale/with_centering": { - "target": "sklearn/sklearn.preprocessing._data/robust_scale/with_centering", + "sklearn/sklearn.tree._classes/BaseDecisionTree/predict/check_input": { + "target": "sklearn/sklearn.tree._classes/BaseDecisionTree/predict/check_input", "defaultType": "boolean", "defaultValue": true }, - "sklearn/sklearn.preprocessing._data/robust_scale/with_scaling": { - "target": "sklearn/sklearn.preprocessing._data/robust_scale/with_scaling", + "sklearn/sklearn.tree._classes/DecisionTreeClassifier/fit/check_input": { + "target": "sklearn/sklearn.tree._classes/DecisionTreeClassifier/fit/check_input", "defaultType": "boolean", "defaultValue": true }, - "sklearn/sklearn.preprocessing._data/robust_scale/copy": { - "target": "sklearn/sklearn.preprocessing._data/robust_scale/copy", + "sklearn/sklearn.tree._classes/DecisionTreeClassifier/predict_proba/check_input": { + "target": "sklearn/sklearn.tree._classes/DecisionTreeClassifier/predict_proba/check_input", "defaultType": "boolean", "defaultValue": true }, - "sklearn/sklearn.preprocessing._data/robust_scale/unit_variance": { - "target": "sklearn/sklearn.preprocessing._data/robust_scale/unit_variance", - "defaultType": "boolean", - "defaultValue": false + "sklearn/sklearn.tree._classes/DecisionTreeRegressor/fit/sample_weight": { + "target": "sklearn/sklearn.tree._classes/DecisionTreeRegressor/fit/sample_weight", + "defaultType": "none", + "defaultValue": null }, - "sklearn/sklearn.preprocessing._data/scale/copy": { - "target": "sklearn/sklearn.preprocessing._data/scale/copy", + "sklearn/sklearn.tree._classes/DecisionTreeRegressor/fit/check_input": { + "target": "sklearn/sklearn.tree._classes/DecisionTreeRegressor/fit/check_input", "defaultType": "boolean", "defaultValue": true }, - "sklearn/sklearn.preprocessing._label/label_binarize/neg_label": { - "target": "sklearn/sklearn.preprocessing._label/label_binarize/neg_label", + "sklearn/sklearn.tree._classes/ExtraTreeClassifier/__init__/splitter": { + "target": "sklearn/sklearn.tree._classes/ExtraTreeClassifier/__init__/splitter", + "defaultType": "string", + "defaultValue": "random" + }, + "sklearn/sklearn.tree._classes/ExtraTreeClassifier/__init__/min_samples_split": { + "target": "sklearn/sklearn.tree._classes/ExtraTreeClassifier/__init__/min_samples_split", "defaultType": "number", - "defaultValue": 0.0 + "defaultValue": 2.0 }, - "sklearn/sklearn.preprocessing._label/label_binarize/pos_label": { - "target": "sklearn/sklearn.preprocessing._label/label_binarize/pos_label", + "sklearn/sklearn.tree._classes/ExtraTreeClassifier/__init__/min_samples_leaf": { + "target": "sklearn/sklearn.tree._classes/ExtraTreeClassifier/__init__/min_samples_leaf", "defaultType": "number", "defaultValue": 1.0 }, - "sklearn/sklearn.preprocessing._label/label_binarize/sparse_output": { - "target": "sklearn/sklearn.preprocessing._label/label_binarize/sparse_output", - "defaultType": "boolean", - "defaultValue": false + "sklearn/sklearn.tree._classes/ExtraTreeClassifier/__init__/min_weight_fraction_leaf": { + "target": "sklearn/sklearn.tree._classes/ExtraTreeClassifier/__init__/min_weight_fraction_leaf", + "defaultType": "number", + "defaultValue": 0.0 }, - "sklearn/sklearn.random_projection/BaseRandomProjection/fit/y": { - "target": "sklearn/sklearn.random_projection/BaseRandomProjection/fit/y", + "sklearn/sklearn.tree._classes/ExtraTreeClassifier/__init__/max_leaf_nodes": { + "target": "sklearn/sklearn.tree._classes/ExtraTreeClassifier/__init__/max_leaf_nodes", "defaultType": "none", "defaultValue": null }, - "sklearn/sklearn.random_projection/GaussianRandomProjection/__init__/compute_inverse_components": { - "target": "sklearn/sklearn.random_projection/GaussianRandomProjection/__init__/compute_inverse_components", - "defaultType": "boolean", - "defaultValue": false + "sklearn/sklearn.tree._classes/ExtraTreeClassifier/__init__/min_impurity_decrease": { + "target": "sklearn/sklearn.tree._classes/ExtraTreeClassifier/__init__/min_impurity_decrease", + "defaultType": "number", + "defaultValue": 0.0 }, - "sklearn/sklearn.random_projection/SparseRandomProjection/__init__/density": { - "target": "sklearn/sklearn.random_projection/SparseRandomProjection/__init__/density", - "defaultType": "string", - "defaultValue": "auto" + "sklearn/sklearn.tree._classes/ExtraTreeClassifier/__init__/class_weight": { + "target": "sklearn/sklearn.tree._classes/ExtraTreeClassifier/__init__/class_weight", + "defaultType": "none", + "defaultValue": null }, - "sklearn/sklearn.random_projection/SparseRandomProjection/__init__/compute_inverse_components": { - "target": "sklearn/sklearn.random_projection/SparseRandomProjection/__init__/compute_inverse_components", - "defaultType": "boolean", - "defaultValue": false + "sklearn/sklearn.tree._classes/ExtraTreeClassifier/__init__/ccp_alpha": { + "target": "sklearn/sklearn.tree._classes/ExtraTreeClassifier/__init__/ccp_alpha", + "defaultType": "number", + "defaultValue": 0.0 + }, + "sklearn/sklearn.tree._classes/ExtraTreeRegressor/__init__/max_depth": { + "target": "sklearn/sklearn.tree._classes/ExtraTreeRegressor/__init__/max_depth", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.tree._classes/ExtraTreeRegressor/__init__/min_samples_split": { + "target": "sklearn/sklearn.tree._classes/ExtraTreeRegressor/__init__/min_samples_split", + "defaultType": "number", + "defaultValue": 2.0 + }, + "sklearn/sklearn.tree._classes/ExtraTreeRegressor/__init__/min_samples_leaf": { + "target": "sklearn/sklearn.tree._classes/ExtraTreeRegressor/__init__/min_samples_leaf", + "defaultType": "number", + "defaultValue": 1.0 + }, + "sklearn/sklearn.tree._classes/ExtraTreeRegressor/__init__/min_weight_fraction_leaf": { + "target": "sklearn/sklearn.tree._classes/ExtraTreeRegressor/__init__/min_weight_fraction_leaf", + "defaultType": "number", + "defaultValue": 0.0 + }, + "sklearn/sklearn.tree._classes/ExtraTreeRegressor/__init__/max_features": { + "target": "sklearn/sklearn.tree._classes/ExtraTreeRegressor/__init__/max_features", + "defaultType": "number", + "defaultValue": 1.0 + }, + "sklearn/sklearn.tree._classes/ExtraTreeRegressor/__init__/min_impurity_decrease": { + "target": "sklearn/sklearn.tree._classes/ExtraTreeRegressor/__init__/min_impurity_decrease", + "defaultType": "number", + "defaultValue": 0.0 + }, + "sklearn/sklearn.tree._classes/ExtraTreeRegressor/__init__/max_leaf_nodes": { + "target": "sklearn/sklearn.tree._classes/ExtraTreeRegressor/__init__/max_leaf_nodes", + "defaultType": "none", + "defaultValue": null + }, + "sklearn/sklearn.tree._classes/ExtraTreeRegressor/__init__/ccp_alpha": { + "target": "sklearn/sklearn.tree._classes/ExtraTreeRegressor/__init__/ccp_alpha", + "defaultType": "number", + "defaultValue": 0.0 }, "sklearn/sklearn.tree._export/export_graphviz/label": { "target": "sklearn/sklearn.tree._export/export_graphviz/label", @@ -1630,6 +5035,11 @@ "defaultType": "number", "defaultValue": 3.0 }, + "sklearn/sklearn.utils._testing/ignore_warnings/obj": { + "target": "sklearn/sklearn.utils._testing/ignore_warnings/obj", + "defaultType": "none", + "defaultValue": null + }, "sklearn/sklearn.utils.class_weight/compute_class_weight/class_weight": { "target": "sklearn/sklearn.utils.class_weight/compute_class_weight/class_weight", "defaultType": "string", @@ -1899,63 +5309,33 @@ "sklearn/sklearn.cluster._bisect_k_means/_BisectingTree": { "target": "sklearn/sklearn.cluster._bisect_k_means/_BisectingTree" }, - "sklearn/sklearn.cluster._feature_agglomeration/AgglomerationTransform": { - "target": "sklearn/sklearn.cluster._feature_agglomeration/AgglomerationTransform" - }, - "sklearn/sklearn.cluster._kmeans/_BaseKMeans": { - "target": "sklearn/sklearn.cluster._kmeans/_BaseKMeans" - }, "sklearn/sklearn.covariance._graph_lasso/_DictWithDeprecatedKeys": { "target": "sklearn/sklearn.covariance._graph_lasso/_DictWithDeprecatedKeys" }, "sklearn/sklearn.cross_decomposition._pls/PLSCanonical": { "target": "sklearn/sklearn.cross_decomposition._pls/PLSCanonical" }, - "sklearn/sklearn.cross_decomposition._pls/_PLS": { - "target": "sklearn/sklearn.cross_decomposition._pls/_PLS" - }, "sklearn/sklearn.datasets._openml/OpenMLError": { "target": "sklearn/sklearn.datasets._openml/OpenMLError" }, - "sklearn/sklearn.decomposition._base/_BasePCA": { - "target": "sklearn/sklearn.decomposition._base/_BasePCA" - }, "sklearn/sklearn.decomposition._dict_learning/DictionaryLearning": { "target": "sklearn/sklearn.decomposition._dict_learning/DictionaryLearning" }, "sklearn/sklearn.decomposition._dict_learning/SparseCoder": { "target": "sklearn/sklearn.decomposition._dict_learning/SparseCoder" }, - "sklearn/sklearn.decomposition._dict_learning/_BaseSparseCoding": { - "target": "sklearn/sklearn.decomposition._dict_learning/_BaseSparseCoding" - }, "sklearn/sklearn.decomposition._nmf/MiniBatchNMF": { "target": "sklearn/sklearn.decomposition._nmf/MiniBatchNMF" }, "sklearn/sklearn.decomposition._sparse_pca/MiniBatchSparsePCA": { "target": "sklearn/sklearn.decomposition._sparse_pca/MiniBatchSparsePCA" }, - "sklearn/sklearn.ensemble._bagging/BaseBagging": { - "target": "sklearn/sklearn.ensemble._bagging/BaseBagging" - }, "sklearn/sklearn.ensemble._base/BaseEnsemble": { "target": "sklearn/sklearn.ensemble._base/BaseEnsemble" }, "sklearn/sklearn.ensemble._base/_BaseHeterogeneousEnsemble": { "target": "sklearn/sklearn.ensemble._base/_BaseHeterogeneousEnsemble" }, - "sklearn/sklearn.ensemble._forest/BaseForest": { - "target": "sklearn/sklearn.ensemble._forest/BaseForest" - }, - "sklearn/sklearn.ensemble._forest/ForestClassifier": { - "target": "sklearn/sklearn.ensemble._forest/ForestClassifier" - }, - "sklearn/sklearn.ensemble._forest/ForestRegressor": { - "target": "sklearn/sklearn.ensemble._forest/ForestRegressor" - }, - "sklearn/sklearn.ensemble._gb/BaseGradientBoosting": { - "target": "sklearn/sklearn.ensemble._gb/BaseGradientBoosting" - }, "sklearn/sklearn.ensemble._gb/VerboseReporter": { "target": "sklearn/sklearn.ensemble._gb/VerboseReporter" }, @@ -1992,9 +5372,6 @@ "sklearn/sklearn.ensemble._hist_gradient_boosting.binning/_BinMapper": { "target": "sklearn/sklearn.ensemble._hist_gradient_boosting.binning/_BinMapper" }, - "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/BaseHistGradientBoosting": { - "target": "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/BaseHistGradientBoosting" - }, "sklearn/sklearn.ensemble._hist_gradient_boosting.grower/TreeGrower": { "target": "sklearn/sklearn.ensemble._hist_gradient_boosting.grower/TreeGrower" }, @@ -2004,12 +5381,6 @@ "sklearn/sklearn.ensemble._hist_gradient_boosting.predictor/TreePredictor": { "target": "sklearn/sklearn.ensemble._hist_gradient_boosting.predictor/TreePredictor" }, - "sklearn/sklearn.ensemble._stacking/_BaseStacking": { - "target": "sklearn/sklearn.ensemble._stacking/_BaseStacking" - }, - "sklearn/sklearn.ensemble._voting/_BaseVoting": { - "target": "sklearn/sklearn.ensemble._voting/_BaseVoting" - }, "sklearn/sklearn.ensemble._weight_boosting/BaseWeightBoosting": { "target": "sklearn/sklearn.ensemble._weight_boosting/BaseWeightBoosting" }, @@ -2127,12 +5498,6 @@ "sklearn/sklearn.externals._packaging.version/_BaseVersion": { "target": "sklearn/sklearn.externals._packaging.version/_BaseVersion" }, - "sklearn/sklearn.feature_extraction.text/_VectorizerMixin": { - "target": "sklearn/sklearn.feature_extraction.text/_VectorizerMixin" - }, - "sklearn/sklearn.feature_selection._univariate_selection/_BaseFilter": { - "target": "sklearn/sklearn.feature_selection._univariate_selection/_BaseFilter" - }, "sklearn/sklearn.gaussian_process._gpc/_BinaryGaussianProcessClassifierLaplace": { "target": "sklearn/sklearn.gaussian_process._gpc/_BinaryGaussianProcessClassifierLaplace" }, @@ -2190,18 +5555,6 @@ "sklearn/sklearn.kernel_approximation/SkewedChi2Sampler": { "target": "sklearn/sklearn.kernel_approximation/SkewedChi2Sampler" }, - "sklearn/sklearn.linear_model._base/LinearClassifierMixin": { - "target": "sklearn/sklearn.linear_model._base/LinearClassifierMixin" - }, - "sklearn/sklearn.linear_model._base/LinearModel": { - "target": "sklearn/sklearn.linear_model._base/LinearModel" - }, - "sklearn/sklearn.linear_model._base/SparseCoefMixin": { - "target": "sklearn/sklearn.linear_model._base/SparseCoefMixin" - }, - "sklearn/sklearn.linear_model._coordinate_descent/LinearModelCV": { - "target": "sklearn/sklearn.linear_model._coordinate_descent/LinearModelCV" - }, "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskLasso": { "target": "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskLasso" }, @@ -2223,18 +5576,12 @@ "sklearn/sklearn.linear_model._ridge/_BaseRidge": { "target": "sklearn/sklearn.linear_model._ridge/_BaseRidge" }, - "sklearn/sklearn.linear_model._ridge/_BaseRidgeCV": { - "target": "sklearn/sklearn.linear_model._ridge/_BaseRidgeCV" - }, "sklearn/sklearn.linear_model._ridge/_IdentityClassifier": { "target": "sklearn/sklearn.linear_model._ridge/_IdentityClassifier" }, "sklearn/sklearn.linear_model._ridge/_IdentityRegressor": { "target": "sklearn/sklearn.linear_model._ridge/_IdentityRegressor" }, - "sklearn/sklearn.linear_model._ridge/_RidgeClassifierMixin": { - "target": "sklearn/sklearn.linear_model._ridge/_RidgeClassifierMixin" - }, "sklearn/sklearn.linear_model._ridge/_RidgeGCV": { "target": "sklearn/sklearn.linear_model._ridge/_RidgeGCV" }, @@ -2247,12 +5594,6 @@ "sklearn/sklearn.linear_model._stochastic_gradient/BaseSGD": { "target": "sklearn/sklearn.linear_model._stochastic_gradient/BaseSGD" }, - "sklearn/sklearn.linear_model._stochastic_gradient/BaseSGDClassifier": { - "target": "sklearn/sklearn.linear_model._stochastic_gradient/BaseSGDClassifier" - }, - "sklearn/sklearn.linear_model._stochastic_gradient/BaseSGDRegressor": { - "target": "sklearn/sklearn.linear_model._stochastic_gradient/BaseSGDRegressor" - }, "sklearn/sklearn.linear_model._stochastic_gradient/SGDOneClassSVM": { "target": "sklearn/sklearn.linear_model._stochastic_gradient/SGDOneClassSVM" }, @@ -2280,18 +5621,9 @@ "sklearn/sklearn.metrics._scorer/_ThresholdScorer": { "target": "sklearn/sklearn.metrics._scorer/_ThresholdScorer" }, - "sklearn/sklearn.mixture._base/BaseMixture": { - "target": "sklearn/sklearn.mixture._base/BaseMixture" - }, - "sklearn/sklearn.model_selection._search/BaseSearchCV": { - "target": "sklearn/sklearn.model_selection._search/BaseSearchCV" - }, "sklearn/sklearn.model_selection._search/ParameterSampler": { "target": "sklearn/sklearn.model_selection._search/ParameterSampler" }, - "sklearn/sklearn.model_selection._search_successive_halving/BaseSuccessiveHalving": { - "target": "sklearn/sklearn.model_selection._search_successive_halving/BaseSuccessiveHalving" - }, "sklearn/sklearn.model_selection._search_successive_halving/HalvingGridSearchCV": { "target": "sklearn/sklearn.model_selection._search_successive_halving/HalvingGridSearchCV" }, @@ -2307,15 +5639,9 @@ "sklearn/sklearn.model_selection._split/StratifiedGroupKFold": { "target": "sklearn/sklearn.model_selection._split/StratifiedGroupKFold" }, - "sklearn/sklearn.model_selection._split/_BaseKFold": { - "target": "sklearn/sklearn.model_selection._split/_BaseKFold" - }, "sklearn/sklearn.model_selection._split/_CVIterableWrapper": { "target": "sklearn/sklearn.model_selection._split/_CVIterableWrapper" }, - "sklearn/sklearn.model_selection._split/_RepeatedSplits": { - "target": "sklearn/sklearn.model_selection._split/_RepeatedSplits" - }, "sklearn/sklearn.multiclass/OutputCodeClassifier": { "target": "sklearn/sklearn.multiclass/OutputCodeClassifier" }, @@ -2325,36 +5651,15 @@ "sklearn/sklearn.multioutput/ClassifierChain": { "target": "sklearn/sklearn.multioutput/ClassifierChain" }, - "sklearn/sklearn.multioutput/_BaseChain": { - "target": "sklearn/sklearn.multioutput/_BaseChain" - }, - "sklearn/sklearn.multioutput/_MultiOutputEstimator": { - "target": "sklearn/sklearn.multioutput/_MultiOutputEstimator" - }, - "sklearn/sklearn.naive_bayes/_BaseDiscreteNB": { - "target": "sklearn/sklearn.naive_bayes/_BaseDiscreteNB" - }, - "sklearn/sklearn.naive_bayes/_BaseNB": { - "target": "sklearn/sklearn.naive_bayes/_BaseNB" - }, - "sklearn/sklearn.neighbors._base/KNeighborsMixin": { - "target": "sklearn/sklearn.neighbors._base/KNeighborsMixin" - }, "sklearn/sklearn.neighbors._base/NeighborsBase": { "target": "sklearn/sklearn.neighbors._base/NeighborsBase" }, - "sklearn/sklearn.neighbors._base/RadiusNeighborsMixin": { - "target": "sklearn/sklearn.neighbors._base/RadiusNeighborsMixin" - }, "sklearn/sklearn.neighbors._graph/KNeighborsTransformer": { "target": "sklearn/sklearn.neighbors._graph/KNeighborsTransformer" }, "sklearn/sklearn.neighbors._graph/RadiusNeighborsTransformer": { "target": "sklearn/sklearn.neighbors._graph/RadiusNeighborsTransformer" }, - "sklearn/sklearn.neural_network._multilayer_perceptron/BaseMultilayerPerceptron": { - "target": "sklearn/sklearn.neural_network._multilayer_perceptron/BaseMultilayerPerceptron" - }, "sklearn/sklearn.neural_network._stochastic_optimizers/AdamOptimizer": { "target": "sklearn/sklearn.neural_network._stochastic_optimizers/AdamOptimizer" }, @@ -2370,18 +5675,9 @@ "sklearn/sklearn.preprocessing._polynomial/SplineTransformer": { "target": "sklearn/sklearn.preprocessing._polynomial/SplineTransformer" }, - "sklearn/sklearn.semi_supervised._label_propagation/BaseLabelPropagation": { - "target": "sklearn/sklearn.semi_supervised._label_propagation/BaseLabelPropagation" - }, "sklearn/sklearn.semi_supervised._self_training/SelfTrainingClassifier": { "target": "sklearn/sklearn.semi_supervised._self_training/SelfTrainingClassifier" }, - "sklearn/sklearn.svm._base/BaseLibSVM": { - "target": "sklearn/sklearn.svm._base/BaseLibSVM" - }, - "sklearn/sklearn.svm._base/BaseSVC": { - "target": "sklearn/sklearn.svm._base/BaseSVC" - }, "sklearn/sklearn.tree._export/Sentinel": { "target": "sklearn/sklearn.tree._export/Sentinel" }, @@ -2430,33 +5726,6 @@ "sklearn/sklearn.utils._mocking/NoSampleWeightWrapper": { "target": "sklearn/sklearn.utils._mocking/NoSampleWeightWrapper" }, - "sklearn/sklearn.utils._param_validation/Interval": { - "target": "sklearn/sklearn.utils._param_validation/Interval" - }, - "sklearn/sklearn.utils._param_validation/StrOptions": { - "target": "sklearn/sklearn.utils._param_validation/StrOptions" - }, - "sklearn/sklearn.utils._param_validation/_ArrayLikes": { - "target": "sklearn/sklearn.utils._param_validation/_ArrayLikes" - }, - "sklearn/sklearn.utils._param_validation/_Callables": { - "target": "sklearn/sklearn.utils._param_validation/_Callables" - }, - "sklearn/sklearn.utils._param_validation/_Constraint": { - "target": "sklearn/sklearn.utils._param_validation/_Constraint" - }, - "sklearn/sklearn.utils._param_validation/_InstancesOf": { - "target": "sklearn/sklearn.utils._param_validation/_InstancesOf" - }, - "sklearn/sklearn.utils._param_validation/_NoneConstraint": { - "target": "sklearn/sklearn.utils._param_validation/_NoneConstraint" - }, - "sklearn/sklearn.utils._param_validation/_RandomStates": { - "target": "sklearn/sklearn.utils._param_validation/_RandomStates" - }, - "sklearn/sklearn.utils._param_validation/_SparseMatrices": { - "target": "sklearn/sklearn.utils._param_validation/_SparseMatrices" - }, "sklearn/sklearn.utils._pprint/KeyValTuple": { "target": "sklearn/sklearn.utils._pprint/KeyValTuple" }, @@ -2499,9 +5768,6 @@ "sklearn/sklearn.utils.metaestimators/_AvailableIfDescriptor": { "target": "sklearn/sklearn.utils.metaestimators/_AvailableIfDescriptor" }, - "sklearn/sklearn.utils.metaestimators/_BaseComposition": { - "target": "sklearn/sklearn.utils.metaestimators/_BaseComposition" - }, "sklearn/sklearn.utils.metaestimators/_IffHasAttrDescriptor": { "target": "sklearn/sklearn.utils.metaestimators/_IffHasAttrDescriptor" }, @@ -2763,9 +6029,6 @@ "sklearn/sklearn.base/BaseEstimator/_validate_data": { "target": "sklearn/sklearn.base/BaseEstimator/_validate_data" }, - "sklearn/sklearn.base/BaseEstimator/_validate_params": { - "target": "sklearn/sklearn.base/BaseEstimator/_validate_params" - }, "sklearn/sklearn.base/BiclusterMixin/biclusters_@getter": { "target": "sklearn/sklearn.base/BiclusterMixin/biclusters_@getter" }, @@ -2853,42 +6116,18 @@ "sklearn/sklearn.calibration/_sigmoid_calibration": { "target": "sklearn/sklearn.calibration/_sigmoid_calibration" }, - "sklearn/sklearn.cluster._affinity_propagation/AffinityPropagation/__init__": { - "target": "sklearn/sklearn.cluster._affinity_propagation/AffinityPropagation/__init__" - }, "sklearn/sklearn.cluster._affinity_propagation/AffinityPropagation/_more_tags": { "target": "sklearn/sklearn.cluster._affinity_propagation/AffinityPropagation/_more_tags" }, - "sklearn/sklearn.cluster._affinity_propagation/AffinityPropagation/fit": { - "target": "sklearn/sklearn.cluster._affinity_propagation/AffinityPropagation/fit" - }, "sklearn/sklearn.cluster._affinity_propagation/AffinityPropagation/fit_predict": { "target": "sklearn/sklearn.cluster._affinity_propagation/AffinityPropagation/fit_predict" }, - "sklearn/sklearn.cluster._affinity_propagation/AffinityPropagation/predict": { - "target": "sklearn/sklearn.cluster._affinity_propagation/AffinityPropagation/predict" - }, "sklearn/sklearn.cluster._affinity_propagation/_equal_similarities_and_preferences": { "target": "sklearn/sklearn.cluster._affinity_propagation/_equal_similarities_and_preferences" }, - "sklearn/sklearn.cluster._agglomerative/AgglomerativeClustering/__init__": { - "target": "sklearn/sklearn.cluster._agglomerative/AgglomerativeClustering/__init__" - }, "sklearn/sklearn.cluster._agglomerative/AgglomerativeClustering/_fit": { "target": "sklearn/sklearn.cluster._agglomerative/AgglomerativeClustering/_fit" }, - "sklearn/sklearn.cluster._agglomerative/AgglomerativeClustering/fit": { - "target": "sklearn/sklearn.cluster._agglomerative/AgglomerativeClustering/fit" - }, - "sklearn/sklearn.cluster._agglomerative/AgglomerativeClustering/fit_predict": { - "target": "sklearn/sklearn.cluster._agglomerative/AgglomerativeClustering/fit_predict" - }, - "sklearn/sklearn.cluster._agglomerative/FeatureAgglomeration/__init__": { - "target": "sklearn/sklearn.cluster._agglomerative/FeatureAgglomeration/__init__" - }, - "sklearn/sklearn.cluster._agglomerative/FeatureAgglomeration/fit": { - "target": "sklearn/sklearn.cluster._agglomerative/FeatureAgglomeration/fit" - }, "sklearn/sklearn.cluster._agglomerative/FeatureAgglomeration/fit_predict@getter": { "target": "sklearn/sklearn.cluster._agglomerative/FeatureAgglomeration/fit_predict@getter" }, @@ -2967,9 +6206,6 @@ "sklearn/sklearn.cluster._bicluster/_scale_normalize": { "target": "sklearn/sklearn.cluster._bicluster/_scale_normalize" }, - "sklearn/sklearn.cluster._birch/Birch/__init__": { - "target": "sklearn/sklearn.cluster._birch/Birch/__init__" - }, "sklearn/sklearn.cluster._birch/Birch/_check_fit": { "target": "sklearn/sklearn.cluster._birch/Birch/_check_fit" }, @@ -2982,15 +6218,9 @@ "sklearn/sklearn.cluster._birch/Birch/_global_clustering": { "target": "sklearn/sklearn.cluster._birch/Birch/_global_clustering" }, - "sklearn/sklearn.cluster._birch/Birch/_more_tags": { - "target": "sklearn/sklearn.cluster._birch/Birch/_more_tags" - }, "sklearn/sklearn.cluster._birch/Birch/_predict": { "target": "sklearn/sklearn.cluster._birch/Birch/_predict" }, - "sklearn/sklearn.cluster._birch/Birch/fit": { - "target": "sklearn/sklearn.cluster._birch/Birch/fit" - }, "sklearn/sklearn.cluster._birch/Birch/fit_@getter": { "target": "sklearn/sklearn.cluster._birch/Birch/fit_@getter" }, @@ -3000,9 +6230,6 @@ "sklearn/sklearn.cluster._birch/Birch/partial_fit_@getter": { "target": "sklearn/sklearn.cluster._birch/Birch/partial_fit_@getter" }, - "sklearn/sklearn.cluster._birch/Birch/predict": { - "target": "sklearn/sklearn.cluster._birch/Birch/predict" - }, "sklearn/sklearn.cluster._birch/Birch/transform": { "target": "sklearn/sklearn.cluster._birch/Birch/transform" }, @@ -3042,6 +6269,9 @@ "sklearn/sklearn.cluster._bisect_k_means/BisectingKMeans/_bisect": { "target": "sklearn/sklearn.cluster._bisect_k_means/BisectingKMeans/_bisect" }, + "sklearn/sklearn.cluster._bisect_k_means/BisectingKMeans/_check_params": { + "target": "sklearn/sklearn.cluster._bisect_k_means/BisectingKMeans/_check_params" + }, "sklearn/sklearn.cluster._bisect_k_means/BisectingKMeans/_inertia_per_cluster": { "target": "sklearn/sklearn.cluster._bisect_k_means/BisectingKMeans/_inertia_per_cluster" }, @@ -3072,41 +6302,17 @@ "sklearn/sklearn.cluster._bisect_k_means/_BisectingTree/split": { "target": "sklearn/sklearn.cluster._bisect_k_means/_BisectingTree/split" }, - "sklearn/sklearn.cluster._dbscan/DBSCAN/__init__": { - "target": "sklearn/sklearn.cluster._dbscan/DBSCAN/__init__" - }, "sklearn/sklearn.cluster._dbscan/DBSCAN/_more_tags": { "target": "sklearn/sklearn.cluster._dbscan/DBSCAN/_more_tags" }, - "sklearn/sklearn.cluster._dbscan/DBSCAN/fit": { - "target": "sklearn/sklearn.cluster._dbscan/DBSCAN/fit" - }, - "sklearn/sklearn.cluster._dbscan/DBSCAN/fit_predict": { - "target": "sklearn/sklearn.cluster._dbscan/DBSCAN/fit_predict" - }, - "sklearn/sklearn.cluster._feature_agglomeration/AgglomerationTransform/inverse_transform": { - "target": "sklearn/sklearn.cluster._feature_agglomeration/AgglomerationTransform/inverse_transform" - }, - "sklearn/sklearn.cluster._feature_agglomeration/AgglomerationTransform/transform": { - "target": "sklearn/sklearn.cluster._feature_agglomeration/AgglomerationTransform/transform" - }, - "sklearn/sklearn.cluster._kmeans/KMeans/__init__": { - "target": "sklearn/sklearn.cluster._kmeans/KMeans/__init__" - }, - "sklearn/sklearn.cluster._kmeans/KMeans/_check_params_vs_input": { - "target": "sklearn/sklearn.cluster._kmeans/KMeans/_check_params_vs_input" + "sklearn/sklearn.cluster._kmeans/KMeans/_check_params": { + "target": "sklearn/sklearn.cluster._kmeans/KMeans/_check_params" }, "sklearn/sklearn.cluster._kmeans/KMeans/_warn_mkl_vcomp": { "target": "sklearn/sklearn.cluster._kmeans/KMeans/_warn_mkl_vcomp" }, - "sklearn/sklearn.cluster._kmeans/KMeans/fit": { - "target": "sklearn/sklearn.cluster._kmeans/KMeans/fit" - }, - "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/__init__": { - "target": "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/__init__" - }, - "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/_check_params_vs_input": { - "target": "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/_check_params_vs_input" + "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/_check_params": { + "target": "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/_check_params" }, "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/_mini_batch_convergence": { "target": "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/_mini_batch_convergence" @@ -3117,20 +6323,14 @@ "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/_warn_mkl_vcomp": { "target": "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/_warn_mkl_vcomp" }, - "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/fit": { - "target": "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/fit" - }, - "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/partial_fit": { - "target": "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/partial_fit" - }, "sklearn/sklearn.cluster._kmeans/_BaseKMeans/__init__": { "target": "sklearn/sklearn.cluster._kmeans/_BaseKMeans/__init__" }, "sklearn/sklearn.cluster._kmeans/_BaseKMeans/_check_mkl_vcomp": { "target": "sklearn/sklearn.cluster._kmeans/_BaseKMeans/_check_mkl_vcomp" }, - "sklearn/sklearn.cluster._kmeans/_BaseKMeans/_check_params_vs_input": { - "target": "sklearn/sklearn.cluster._kmeans/_BaseKMeans/_check_params_vs_input" + "sklearn/sklearn.cluster._kmeans/_BaseKMeans/_check_params": { + "target": "sklearn/sklearn.cluster._kmeans/_BaseKMeans/_check_params" }, "sklearn/sklearn.cluster._kmeans/_BaseKMeans/_check_test_data": { "target": "sklearn/sklearn.cluster._kmeans/_BaseKMeans/_check_test_data" @@ -3150,21 +6350,6 @@ "sklearn/sklearn.cluster._kmeans/_BaseKMeans/_warn_mkl_vcomp": { "target": "sklearn/sklearn.cluster._kmeans/_BaseKMeans/_warn_mkl_vcomp" }, - "sklearn/sklearn.cluster._kmeans/_BaseKMeans/fit_predict": { - "target": "sklearn/sklearn.cluster._kmeans/_BaseKMeans/fit_predict" - }, - "sklearn/sklearn.cluster._kmeans/_BaseKMeans/fit_transform": { - "target": "sklearn/sklearn.cluster._kmeans/_BaseKMeans/fit_transform" - }, - "sklearn/sklearn.cluster._kmeans/_BaseKMeans/predict": { - "target": "sklearn/sklearn.cluster._kmeans/_BaseKMeans/predict" - }, - "sklearn/sklearn.cluster._kmeans/_BaseKMeans/score": { - "target": "sklearn/sklearn.cluster._kmeans/_BaseKMeans/score" - }, - "sklearn/sklearn.cluster._kmeans/_BaseKMeans/transform": { - "target": "sklearn/sklearn.cluster._kmeans/_BaseKMeans/transform" - }, "sklearn/sklearn.cluster._kmeans/_kmeans_plusplus": { "target": "sklearn/sklearn.cluster._kmeans/_kmeans_plusplus" }, @@ -3189,15 +6374,6 @@ "sklearn/sklearn.cluster._kmeans/kmeans_plusplus": { "target": "sklearn/sklearn.cluster._kmeans/kmeans_plusplus" }, - "sklearn/sklearn.cluster._mean_shift/MeanShift/__init__": { - "target": "sklearn/sklearn.cluster._mean_shift/MeanShift/__init__" - }, - "sklearn/sklearn.cluster._mean_shift/MeanShift/fit": { - "target": "sklearn/sklearn.cluster._mean_shift/MeanShift/fit" - }, - "sklearn/sklearn.cluster._mean_shift/MeanShift/predict": { - "target": "sklearn/sklearn.cluster._mean_shift/MeanShift/predict" - }, "sklearn/sklearn.cluster._mean_shift/_mean_shift_single_seed": { "target": "sklearn/sklearn.cluster._mean_shift/_mean_shift_single_seed" }, @@ -3207,12 +6383,6 @@ "sklearn/sklearn.cluster._mean_shift/mean_shift": { "target": "sklearn/sklearn.cluster._mean_shift/mean_shift" }, - "sklearn/sklearn.cluster._optics/OPTICS/__init__": { - "target": "sklearn/sklearn.cluster._optics/OPTICS/__init__" - }, - "sklearn/sklearn.cluster._optics/OPTICS/fit": { - "target": "sklearn/sklearn.cluster._optics/OPTICS/fit" - }, "sklearn/sklearn.cluster._optics/_compute_core_distances_": { "target": "sklearn/sklearn.cluster._optics/_compute_core_distances_" }, @@ -3246,18 +6416,9 @@ "sklearn/sklearn.cluster._optics/compute_optics_graph": { "target": "sklearn/sklearn.cluster._optics/compute_optics_graph" }, - "sklearn/sklearn.cluster._spectral/SpectralClustering/__init__": { - "target": "sklearn/sklearn.cluster._spectral/SpectralClustering/__init__" - }, "sklearn/sklearn.cluster._spectral/SpectralClustering/_more_tags": { "target": "sklearn/sklearn.cluster._spectral/SpectralClustering/_more_tags" }, - "sklearn/sklearn.cluster._spectral/SpectralClustering/fit": { - "target": "sklearn/sklearn.cluster._spectral/SpectralClustering/fit" - }, - "sklearn/sklearn.cluster._spectral/SpectralClustering/fit_predict": { - "target": "sklearn/sklearn.cluster._spectral/SpectralClustering/fit_predict" - }, "sklearn/sklearn.cluster._spectral/cluster_qr": { "target": "sklearn/sklearn.cluster._spectral/cluster_qr" }, @@ -3270,18 +6431,12 @@ "sklearn/sklearn.cluster.setup/configuration": { "target": "sklearn/sklearn.cluster.setup/configuration" }, - "sklearn/sklearn.compose._column_transformer/ColumnTransformer/__init__": { - "target": "sklearn/sklearn.compose._column_transformer/ColumnTransformer/__init__" - }, "sklearn/sklearn.compose._column_transformer/ColumnTransformer/_fit_transform": { "target": "sklearn/sklearn.compose._column_transformer/ColumnTransformer/_fit_transform" }, "sklearn/sklearn.compose._column_transformer/ColumnTransformer/_get_feature_name_out_for_transformer": { "target": "sklearn/sklearn.compose._column_transformer/ColumnTransformer/_get_feature_name_out_for_transformer" }, - "sklearn/sklearn.compose._column_transformer/ColumnTransformer/_hstack": { - "target": "sklearn/sklearn.compose._column_transformer/ColumnTransformer/_hstack" - }, "sklearn/sklearn.compose._column_transformer/ColumnTransformer/_iter": { "target": "sklearn/sklearn.compose._column_transformer/ColumnTransformer/_iter" }, @@ -3315,30 +6470,12 @@ "sklearn/sklearn.compose._column_transformer/ColumnTransformer/_validate_transformers": { "target": "sklearn/sklearn.compose._column_transformer/ColumnTransformer/_validate_transformers" }, - "sklearn/sklearn.compose._column_transformer/ColumnTransformer/fit": { - "target": "sklearn/sklearn.compose._column_transformer/ColumnTransformer/fit" - }, - "sklearn/sklearn.compose._column_transformer/ColumnTransformer/fit_transform": { - "target": "sklearn/sklearn.compose._column_transformer/ColumnTransformer/fit_transform" - }, - "sklearn/sklearn.compose._column_transformer/ColumnTransformer/get_feature_names": { - "target": "sklearn/sklearn.compose._column_transformer/ColumnTransformer/get_feature_names" - }, "sklearn/sklearn.compose._column_transformer/ColumnTransformer/get_feature_names_out": { "target": "sklearn/sklearn.compose._column_transformer/ColumnTransformer/get_feature_names_out" }, - "sklearn/sklearn.compose._column_transformer/ColumnTransformer/get_params": { - "target": "sklearn/sklearn.compose._column_transformer/ColumnTransformer/get_params" - }, "sklearn/sklearn.compose._column_transformer/ColumnTransformer/named_transformers_@getter": { "target": "sklearn/sklearn.compose._column_transformer/ColumnTransformer/named_transformers_@getter" }, - "sklearn/sklearn.compose._column_transformer/ColumnTransformer/set_params": { - "target": "sklearn/sklearn.compose._column_transformer/ColumnTransformer/set_params" - }, - "sklearn/sklearn.compose._column_transformer/ColumnTransformer/transform": { - "target": "sklearn/sklearn.compose._column_transformer/ColumnTransformer/transform" - }, "sklearn/sklearn.compose._column_transformer/_check_X": { "target": "sklearn/sklearn.compose._column_transformer/_check_X" }, @@ -3351,27 +6488,15 @@ "sklearn/sklearn.compose._column_transformer/make_column_selector/__call__": { "target": "sklearn/sklearn.compose._column_transformer/make_column_selector/__call__" }, - "sklearn/sklearn.compose._column_transformer/make_column_selector/__init__": { - "target": "sklearn/sklearn.compose._column_transformer/make_column_selector/__init__" - }, - "sklearn/sklearn.compose._target/TransformedTargetRegressor/__init__": { - "target": "sklearn/sklearn.compose._target/TransformedTargetRegressor/__init__" - }, "sklearn/sklearn.compose._target/TransformedTargetRegressor/_fit_transformer": { "target": "sklearn/sklearn.compose._target/TransformedTargetRegressor/_fit_transformer" }, "sklearn/sklearn.compose._target/TransformedTargetRegressor/_more_tags": { "target": "sklearn/sklearn.compose._target/TransformedTargetRegressor/_more_tags" }, - "sklearn/sklearn.compose._target/TransformedTargetRegressor/fit": { - "target": "sklearn/sklearn.compose._target/TransformedTargetRegressor/fit" - }, "sklearn/sklearn.compose._target/TransformedTargetRegressor/n_features_in_@getter": { "target": "sklearn/sklearn.compose._target/TransformedTargetRegressor/n_features_in_@getter" }, - "sklearn/sklearn.compose._target/TransformedTargetRegressor/predict": { - "target": "sklearn/sklearn.compose._target/TransformedTargetRegressor/predict" - }, "sklearn/sklearn.conftest/_fetch_fixture": { "target": "sklearn/sklearn.conftest/_fetch_fixture" }, @@ -3390,42 +6515,21 @@ "sklearn/sklearn.conftest/pytest_runtest_setup": { "target": "sklearn/sklearn.conftest/pytest_runtest_setup" }, - "sklearn/sklearn.covariance._elliptic_envelope/EllipticEnvelope/__init__": { - "target": "sklearn/sklearn.covariance._elliptic_envelope/EllipticEnvelope/__init__" - }, - "sklearn/sklearn.covariance._elliptic_envelope/EllipticEnvelope/decision_function": { - "target": "sklearn/sklearn.covariance._elliptic_envelope/EllipticEnvelope/decision_function" - }, - "sklearn/sklearn.covariance._elliptic_envelope/EllipticEnvelope/fit": { - "target": "sklearn/sklearn.covariance._elliptic_envelope/EllipticEnvelope/fit" - }, - "sklearn/sklearn.covariance._elliptic_envelope/EllipticEnvelope/predict": { - "target": "sklearn/sklearn.covariance._elliptic_envelope/EllipticEnvelope/predict" - }, "sklearn/sklearn.covariance._elliptic_envelope/EllipticEnvelope/score": { "target": "sklearn/sklearn.covariance._elliptic_envelope/EllipticEnvelope/score" }, "sklearn/sklearn.covariance._elliptic_envelope/EllipticEnvelope/score_samples": { "target": "sklearn/sklearn.covariance._elliptic_envelope/EllipticEnvelope/score_samples" }, - "sklearn/sklearn.covariance._empirical_covariance/EmpiricalCovariance/__init__": { - "target": "sklearn/sklearn.covariance._empirical_covariance/EmpiricalCovariance/__init__" - }, "sklearn/sklearn.covariance._empirical_covariance/EmpiricalCovariance/_set_covariance": { "target": "sklearn/sklearn.covariance._empirical_covariance/EmpiricalCovariance/_set_covariance" }, "sklearn/sklearn.covariance._empirical_covariance/EmpiricalCovariance/error_norm": { "target": "sklearn/sklearn.covariance._empirical_covariance/EmpiricalCovariance/error_norm" }, - "sklearn/sklearn.covariance._empirical_covariance/EmpiricalCovariance/fit": { - "target": "sklearn/sklearn.covariance._empirical_covariance/EmpiricalCovariance/fit" - }, "sklearn/sklearn.covariance._empirical_covariance/EmpiricalCovariance/get_precision": { "target": "sklearn/sklearn.covariance._empirical_covariance/EmpiricalCovariance/get_precision" }, - "sklearn/sklearn.covariance._empirical_covariance/EmpiricalCovariance/mahalanobis": { - "target": "sklearn/sklearn.covariance._empirical_covariance/EmpiricalCovariance/mahalanobis" - }, "sklearn/sklearn.covariance._empirical_covariance/EmpiricalCovariance/score": { "target": "sklearn/sklearn.covariance._empirical_covariance/EmpiricalCovariance/score" }, @@ -3435,15 +6539,6 @@ "sklearn/sklearn.covariance._empirical_covariance/log_likelihood": { "target": "sklearn/sklearn.covariance._empirical_covariance/log_likelihood" }, - "sklearn/sklearn.covariance._graph_lasso/GraphicalLasso/__init__": { - "target": "sklearn/sklearn.covariance._graph_lasso/GraphicalLasso/__init__" - }, - "sklearn/sklearn.covariance._graph_lasso/GraphicalLasso/fit": { - "target": "sklearn/sklearn.covariance._graph_lasso/GraphicalLasso/fit" - }, - "sklearn/sklearn.covariance._graph_lasso/GraphicalLassoCV/__init__": { - "target": "sklearn/sklearn.covariance._graph_lasso/GraphicalLassoCV/__init__" - }, "sklearn/sklearn.covariance._graph_lasso/GraphicalLassoCV/fit": { "target": "sklearn/sklearn.covariance._graph_lasso/GraphicalLassoCV/fit" }, @@ -3471,15 +6566,9 @@ "sklearn/sklearn.covariance._graph_lasso/graphical_lasso_path": { "target": "sklearn/sklearn.covariance._graph_lasso/graphical_lasso_path" }, - "sklearn/sklearn.covariance._robust_covariance/MinCovDet/__init__": { - "target": "sklearn/sklearn.covariance._robust_covariance/MinCovDet/__init__" - }, "sklearn/sklearn.covariance._robust_covariance/MinCovDet/correct_covariance": { "target": "sklearn/sklearn.covariance._robust_covariance/MinCovDet/correct_covariance" }, - "sklearn/sklearn.covariance._robust_covariance/MinCovDet/fit": { - "target": "sklearn/sklearn.covariance._robust_covariance/MinCovDet/fit" - }, "sklearn/sklearn.covariance._robust_covariance/MinCovDet/reweight_covariance": { "target": "sklearn/sklearn.covariance._robust_covariance/MinCovDet/reweight_covariance" }, @@ -3495,21 +6584,6 @@ "sklearn/sklearn.covariance._robust_covariance/select_candidates": { "target": "sklearn/sklearn.covariance._robust_covariance/select_candidates" }, - "sklearn/sklearn.covariance._shrunk_covariance/LedoitWolf/__init__": { - "target": "sklearn/sklearn.covariance._shrunk_covariance/LedoitWolf/__init__" - }, - "sklearn/sklearn.covariance._shrunk_covariance/LedoitWolf/fit": { - "target": "sklearn/sklearn.covariance._shrunk_covariance/LedoitWolf/fit" - }, - "sklearn/sklearn.covariance._shrunk_covariance/OAS/fit": { - "target": "sklearn/sklearn.covariance._shrunk_covariance/OAS/fit" - }, - "sklearn/sklearn.covariance._shrunk_covariance/ShrunkCovariance/__init__": { - "target": "sklearn/sklearn.covariance._shrunk_covariance/ShrunkCovariance/__init__" - }, - "sklearn/sklearn.covariance._shrunk_covariance/ShrunkCovariance/fit": { - "target": "sklearn/sklearn.covariance._shrunk_covariance/ShrunkCovariance/fit" - }, "sklearn/sklearn.covariance._shrunk_covariance/ledoit_wolf_shrinkage": { "target": "sklearn/sklearn.covariance._shrunk_covariance/ledoit_wolf_shrinkage" }, @@ -3519,21 +6593,9 @@ "sklearn/sklearn.covariance._shrunk_covariance/shrunk_covariance": { "target": "sklearn/sklearn.covariance._shrunk_covariance/shrunk_covariance" }, - "sklearn/sklearn.cross_decomposition._pls/CCA/__init__": { - "target": "sklearn/sklearn.cross_decomposition._pls/CCA/__init__" - }, "sklearn/sklearn.cross_decomposition._pls/PLSCanonical/__init__": { "target": "sklearn/sklearn.cross_decomposition._pls/PLSCanonical/__init__" }, - "sklearn/sklearn.cross_decomposition._pls/PLSRegression/__init__": { - "target": "sklearn/sklearn.cross_decomposition._pls/PLSRegression/__init__" - }, - "sklearn/sklearn.cross_decomposition._pls/PLSRegression/fit": { - "target": "sklearn/sklearn.cross_decomposition._pls/PLSRegression/fit" - }, - "sklearn/sklearn.cross_decomposition._pls/PLSSVD/__init__": { - "target": "sklearn/sklearn.cross_decomposition._pls/PLSSVD/__init__" - }, "sklearn/sklearn.cross_decomposition._pls/PLSSVD/fit": { "target": "sklearn/sklearn.cross_decomposition._pls/PLSSVD/fit" }, @@ -3552,21 +6614,9 @@ "sklearn/sklearn.cross_decomposition._pls/_PLS/coef_@getter": { "target": "sklearn/sklearn.cross_decomposition._pls/_PLS/coef_@getter" }, - "sklearn/sklearn.cross_decomposition._pls/_PLS/fit": { - "target": "sklearn/sklearn.cross_decomposition._pls/_PLS/fit" - }, - "sklearn/sklearn.cross_decomposition._pls/_PLS/fit_transform": { - "target": "sklearn/sklearn.cross_decomposition._pls/_PLS/fit_transform" - }, "sklearn/sklearn.cross_decomposition._pls/_PLS/inverse_transform": { "target": "sklearn/sklearn.cross_decomposition._pls/_PLS/inverse_transform" }, - "sklearn/sklearn.cross_decomposition._pls/_PLS/predict": { - "target": "sklearn/sklearn.cross_decomposition._pls/_PLS/predict" - }, - "sklearn/sklearn.cross_decomposition._pls/_PLS/transform": { - "target": "sklearn/sklearn.cross_decomposition._pls/_PLS/transform" - }, "sklearn/sklearn.cross_decomposition._pls/_center_scale_xy": { "target": "sklearn/sklearn.cross_decomposition._pls/_center_scale_xy" }, @@ -3582,14 +6632,17 @@ "sklearn/sklearn.cross_decomposition._pls/_svd_flip_1d": { "target": "sklearn/sklearn.cross_decomposition._pls/_svd_flip_1d" }, - "sklearn/sklearn.datasets._arff_parser/_liac_arff_parser": { - "target": "sklearn/sklearn.datasets._arff_parser/_liac_arff_parser" + "sklearn/sklearn.datasets._arff_parser/_convert_arff_data": { + "target": "sklearn/sklearn.datasets._arff_parser/_convert_arff_data" }, - "sklearn/sklearn.datasets._arff_parser/_pandas_arff_parser": { - "target": "sklearn/sklearn.datasets._arff_parser/_pandas_arff_parser" + "sklearn/sklearn.datasets._arff_parser/_convert_arff_data_dataframe": { + "target": "sklearn/sklearn.datasets._arff_parser/_convert_arff_data_dataframe" }, - "sklearn/sklearn.datasets._arff_parser/_post_process_frame": { - "target": "sklearn/sklearn.datasets._arff_parser/_post_process_frame" + "sklearn/sklearn.datasets._arff_parser/_feature_to_dtype": { + "target": "sklearn/sklearn.datasets._arff_parser/_feature_to_dtype" + }, + "sklearn/sklearn.datasets._arff_parser/_liac_arff_parser": { + "target": "sklearn/sklearn.datasets._arff_parser/_liac_arff_parser" }, "sklearn/sklearn.datasets._arff_parser/_sparse_data_to_array": { "target": "sklearn/sklearn.datasets._arff_parser/_sparse_data_to_array" @@ -3597,9 +6650,6 @@ "sklearn/sklearn.datasets._arff_parser/_split_sparse_columns": { "target": "sklearn/sklearn.datasets._arff_parser/_split_sparse_columns" }, - "sklearn/sklearn.datasets._arff_parser/load_arff_from_gzip_file": { - "target": "sklearn/sklearn.datasets._arff_parser/load_arff_from_gzip_file" - }, "sklearn/sklearn.datasets._base/_convert_data_dataframe": { "target": "sklearn/sklearn.datasets._base/_convert_data_dataframe" }, @@ -3825,12 +6875,6 @@ "sklearn/sklearn.decomposition._base/_BasePCA/get_precision": { "target": "sklearn/sklearn.decomposition._base/_BasePCA/get_precision" }, - "sklearn/sklearn.decomposition._base/_BasePCA/inverse_transform": { - "target": "sklearn/sklearn.decomposition._base/_BasePCA/inverse_transform" - }, - "sklearn/sklearn.decomposition._base/_BasePCA/transform": { - "target": "sklearn/sklearn.decomposition._base/_BasePCA/transform" - }, "sklearn/sklearn.decomposition._dict_learning/DictionaryLearning/__init__": { "target": "sklearn/sklearn.decomposition._dict_learning/DictionaryLearning/__init__" }, @@ -3843,9 +6887,6 @@ "sklearn/sklearn.decomposition._dict_learning/DictionaryLearning/fit": { "target": "sklearn/sklearn.decomposition._dict_learning/DictionaryLearning/fit" }, - "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__": { - "target": "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__" - }, "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/_check_convergence": { "target": "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/_check_convergence" }, @@ -3909,9 +6950,6 @@ "sklearn/sklearn.decomposition._dict_learning/_BaseSparseCoding/_transform": { "target": "sklearn/sklearn.decomposition._dict_learning/_BaseSparseCoding/_transform" }, - "sklearn/sklearn.decomposition._dict_learning/_BaseSparseCoding/transform": { - "target": "sklearn/sklearn.decomposition._dict_learning/_BaseSparseCoding/transform" - }, "sklearn/sklearn.decomposition._dict_learning/_check_positive_coding": { "target": "sklearn/sklearn.decomposition._dict_learning/_check_positive_coding" }, @@ -3933,18 +6971,12 @@ "sklearn/sklearn.decomposition._dict_learning/sparse_encode": { "target": "sklearn/sklearn.decomposition._dict_learning/sparse_encode" }, - "sklearn/sklearn.decomposition._factor_analysis/FactorAnalysis/__init__": { - "target": "sklearn/sklearn.decomposition._factor_analysis/FactorAnalysis/__init__" - }, "sklearn/sklearn.decomposition._factor_analysis/FactorAnalysis/_n_features_out@getter": { "target": "sklearn/sklearn.decomposition._factor_analysis/FactorAnalysis/_n_features_out@getter" }, "sklearn/sklearn.decomposition._factor_analysis/FactorAnalysis/_rotate": { "target": "sklearn/sklearn.decomposition._factor_analysis/FactorAnalysis/_rotate" }, - "sklearn/sklearn.decomposition._factor_analysis/FactorAnalysis/fit": { - "target": "sklearn/sklearn.decomposition._factor_analysis/FactorAnalysis/fit" - }, "sklearn/sklearn.decomposition._factor_analysis/FactorAnalysis/get_covariance": { "target": "sklearn/sklearn.decomposition._factor_analysis/FactorAnalysis/get_covariance" }, @@ -3957,15 +6989,9 @@ "sklearn/sklearn.decomposition._factor_analysis/FactorAnalysis/score_samples": { "target": "sklearn/sklearn.decomposition._factor_analysis/FactorAnalysis/score_samples" }, - "sklearn/sklearn.decomposition._factor_analysis/FactorAnalysis/transform": { - "target": "sklearn/sklearn.decomposition._factor_analysis/FactorAnalysis/transform" - }, "sklearn/sklearn.decomposition._factor_analysis/_ortho_rotation": { "target": "sklearn/sklearn.decomposition._factor_analysis/_ortho_rotation" }, - "sklearn/sklearn.decomposition._fastica/FastICA/__init__": { - "target": "sklearn/sklearn.decomposition._fastica/FastICA/__init__" - }, "sklearn/sklearn.decomposition._fastica/FastICA/_fit": { "target": "sklearn/sklearn.decomposition._fastica/FastICA/_fit" }, @@ -3975,18 +7001,6 @@ "sklearn/sklearn.decomposition._fastica/FastICA/_n_features_out@getter": { "target": "sklearn/sklearn.decomposition._fastica/FastICA/_n_features_out@getter" }, - "sklearn/sklearn.decomposition._fastica/FastICA/fit": { - "target": "sklearn/sklearn.decomposition._fastica/FastICA/fit" - }, - "sklearn/sklearn.decomposition._fastica/FastICA/fit_transform": { - "target": "sklearn/sklearn.decomposition._fastica/FastICA/fit_transform" - }, - "sklearn/sklearn.decomposition._fastica/FastICA/inverse_transform": { - "target": "sklearn/sklearn.decomposition._fastica/FastICA/inverse_transform" - }, - "sklearn/sklearn.decomposition._fastica/FastICA/transform": { - "target": "sklearn/sklearn.decomposition._fastica/FastICA/transform" - }, "sklearn/sklearn.decomposition._fastica/_cube": { "target": "sklearn/sklearn.decomposition._fastica/_cube" }, @@ -4011,21 +7025,6 @@ "sklearn/sklearn.decomposition._fastica/fastica": { "target": "sklearn/sklearn.decomposition._fastica/fastica" }, - "sklearn/sklearn.decomposition._incremental_pca/IncrementalPCA/__init__": { - "target": "sklearn/sklearn.decomposition._incremental_pca/IncrementalPCA/__init__" - }, - "sklearn/sklearn.decomposition._incremental_pca/IncrementalPCA/fit": { - "target": "sklearn/sklearn.decomposition._incremental_pca/IncrementalPCA/fit" - }, - "sklearn/sklearn.decomposition._incremental_pca/IncrementalPCA/partial_fit": { - "target": "sklearn/sklearn.decomposition._incremental_pca/IncrementalPCA/partial_fit" - }, - "sklearn/sklearn.decomposition._incremental_pca/IncrementalPCA/transform": { - "target": "sklearn/sklearn.decomposition._incremental_pca/IncrementalPCA/transform" - }, - "sklearn/sklearn.decomposition._kernel_pca/KernelPCA/__init__": { - "target": "sklearn/sklearn.decomposition._kernel_pca/KernelPCA/__init__" - }, "sklearn/sklearn.decomposition._kernel_pca/KernelPCA/_fit_inverse_transform": { "target": "sklearn/sklearn.decomposition._kernel_pca/KernelPCA/_fit_inverse_transform" }, @@ -4044,24 +7043,12 @@ "sklearn/sklearn.decomposition._kernel_pca/KernelPCA/alphas_@getter": { "target": "sklearn/sklearn.decomposition._kernel_pca/KernelPCA/alphas_@getter" }, - "sklearn/sklearn.decomposition._kernel_pca/KernelPCA/fit": { - "target": "sklearn/sklearn.decomposition._kernel_pca/KernelPCA/fit" - }, - "sklearn/sklearn.decomposition._kernel_pca/KernelPCA/fit_transform": { - "target": "sklearn/sklearn.decomposition._kernel_pca/KernelPCA/fit_transform" - }, "sklearn/sklearn.decomposition._kernel_pca/KernelPCA/inverse_transform": { "target": "sklearn/sklearn.decomposition._kernel_pca/KernelPCA/inverse_transform" }, "sklearn/sklearn.decomposition._kernel_pca/KernelPCA/lambdas_@getter": { "target": "sklearn/sklearn.decomposition._kernel_pca/KernelPCA/lambdas_@getter" }, - "sklearn/sklearn.decomposition._kernel_pca/KernelPCA/transform": { - "target": "sklearn/sklearn.decomposition._kernel_pca/KernelPCA/transform" - }, - "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/__init__": { - "target": "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/__init__" - }, "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/_approx_bound": { "target": "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/_approx_bound" }, @@ -4092,20 +7079,8 @@ "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/_unnormalized_transform": { "target": "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/_unnormalized_transform" }, - "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/fit": { - "target": "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/fit" - }, "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/partial_fit": { - "target": "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/partial_fit" - }, - "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/perplexity": { - "target": "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/perplexity" - }, - "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/score": { - "target": "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/score" - }, - "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/transform": { - "target": "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/transform" + "target": "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/partial_fit" }, "sklearn/sklearn.decomposition._lda/_update_doc_distribution": { "target": "sklearn/sklearn.decomposition._lda/_update_doc_distribution" @@ -4137,9 +7112,6 @@ "sklearn/sklearn.decomposition._nmf/MiniBatchNMF/transform": { "target": "sklearn/sklearn.decomposition._nmf/MiniBatchNMF/transform" }, - "sklearn/sklearn.decomposition._nmf/NMF/__init__": { - "target": "sklearn/sklearn.decomposition._nmf/NMF/__init__" - }, "sklearn/sklearn.decomposition._nmf/NMF/_check_params": { "target": "sklearn/sklearn.decomposition._nmf/NMF/_check_params" }, @@ -4158,18 +7130,9 @@ "sklearn/sklearn.decomposition._nmf/NMF/_scale_regularization": { "target": "sklearn/sklearn.decomposition._nmf/NMF/_scale_regularization" }, - "sklearn/sklearn.decomposition._nmf/NMF/fit": { - "target": "sklearn/sklearn.decomposition._nmf/NMF/fit" - }, - "sklearn/sklearn.decomposition._nmf/NMF/fit_transform": { - "target": "sklearn/sklearn.decomposition._nmf/NMF/fit_transform" - }, "sklearn/sklearn.decomposition._nmf/NMF/inverse_transform": { "target": "sklearn/sklearn.decomposition._nmf/NMF/inverse_transform" }, - "sklearn/sklearn.decomposition._nmf/NMF/transform": { - "target": "sklearn/sklearn.decomposition._nmf/NMF/transform" - }, "sklearn/sklearn.decomposition._nmf/_beta_divergence": { "target": "sklearn/sklearn.decomposition._nmf/_beta_divergence" }, @@ -4212,9 +7175,6 @@ "sklearn/sklearn.decomposition._nmf/trace_dot": { "target": "sklearn/sklearn.decomposition._nmf/trace_dot" }, - "sklearn/sklearn.decomposition._pca/PCA/__init__": { - "target": "sklearn/sklearn.decomposition._pca/PCA/__init__" - }, "sklearn/sklearn.decomposition._pca/PCA/_fit": { "target": "sklearn/sklearn.decomposition._pca/PCA/_fit" }, @@ -4227,12 +7187,6 @@ "sklearn/sklearn.decomposition._pca/PCA/_more_tags": { "target": "sklearn/sklearn.decomposition._pca/PCA/_more_tags" }, - "sklearn/sklearn.decomposition._pca/PCA/fit": { - "target": "sklearn/sklearn.decomposition._pca/PCA/fit" - }, - "sklearn/sklearn.decomposition._pca/PCA/fit_transform": { - "target": "sklearn/sklearn.decomposition._pca/PCA/fit_transform" - }, "sklearn/sklearn.decomposition._pca/PCA/score": { "target": "sklearn/sklearn.decomposition._pca/PCA/score" }, @@ -4251,9 +7205,6 @@ "sklearn/sklearn.decomposition._sparse_pca/MiniBatchSparsePCA/fit": { "target": "sklearn/sklearn.decomposition._sparse_pca/MiniBatchSparsePCA/fit" }, - "sklearn/sklearn.decomposition._sparse_pca/SparsePCA/__init__": { - "target": "sklearn/sklearn.decomposition._sparse_pca/SparsePCA/__init__" - }, "sklearn/sklearn.decomposition._sparse_pca/SparsePCA/_more_tags": { "target": "sklearn/sklearn.decomposition._sparse_pca/SparsePCA/_more_tags" }, @@ -4263,30 +7214,12 @@ "sklearn/sklearn.decomposition._sparse_pca/SparsePCA/fit": { "target": "sklearn/sklearn.decomposition._sparse_pca/SparsePCA/fit" }, - "sklearn/sklearn.decomposition._sparse_pca/SparsePCA/transform": { - "target": "sklearn/sklearn.decomposition._sparse_pca/SparsePCA/transform" - }, - "sklearn/sklearn.decomposition._truncated_svd/TruncatedSVD/__init__": { - "target": "sklearn/sklearn.decomposition._truncated_svd/TruncatedSVD/__init__" - }, "sklearn/sklearn.decomposition._truncated_svd/TruncatedSVD/_more_tags": { "target": "sklearn/sklearn.decomposition._truncated_svd/TruncatedSVD/_more_tags" }, "sklearn/sklearn.decomposition._truncated_svd/TruncatedSVD/_n_features_out@getter": { "target": "sklearn/sklearn.decomposition._truncated_svd/TruncatedSVD/_n_features_out@getter" }, - "sklearn/sklearn.decomposition._truncated_svd/TruncatedSVD/fit": { - "target": "sklearn/sklearn.decomposition._truncated_svd/TruncatedSVD/fit" - }, - "sklearn/sklearn.decomposition._truncated_svd/TruncatedSVD/fit_transform": { - "target": "sklearn/sklearn.decomposition._truncated_svd/TruncatedSVD/fit_transform" - }, - "sklearn/sklearn.decomposition._truncated_svd/TruncatedSVD/inverse_transform": { - "target": "sklearn/sklearn.decomposition._truncated_svd/TruncatedSVD/inverse_transform" - }, - "sklearn/sklearn.decomposition._truncated_svd/TruncatedSVD/transform": { - "target": "sklearn/sklearn.decomposition._truncated_svd/TruncatedSVD/transform" - }, "sklearn/sklearn.decomposition.setup/configuration": { "target": "sklearn/sklearn.decomposition.setup/configuration" }, @@ -4338,9 +7271,6 @@ "sklearn/sklearn.dummy/DummyRegressor/n_features_in_@getter": { "target": "sklearn/sklearn.dummy/DummyRegressor/n_features_in_@getter" }, - "sklearn/sklearn.ensemble._bagging/BaggingClassifier/__init__": { - "target": "sklearn/sklearn.ensemble._bagging/BaggingClassifier/__init__" - }, "sklearn/sklearn.ensemble._bagging/BaggingClassifier/_set_oob_score": { "target": "sklearn/sklearn.ensemble._bagging/BaggingClassifier/_set_oob_score" }, @@ -4353,27 +7283,15 @@ "sklearn/sklearn.ensemble._bagging/BaggingClassifier/decision_function": { "target": "sklearn/sklearn.ensemble._bagging/BaggingClassifier/decision_function" }, - "sklearn/sklearn.ensemble._bagging/BaggingClassifier/predict": { - "target": "sklearn/sklearn.ensemble._bagging/BaggingClassifier/predict" - }, "sklearn/sklearn.ensemble._bagging/BaggingClassifier/predict_log_proba": { "target": "sklearn/sklearn.ensemble._bagging/BaggingClassifier/predict_log_proba" }, - "sklearn/sklearn.ensemble._bagging/BaggingClassifier/predict_proba": { - "target": "sklearn/sklearn.ensemble._bagging/BaggingClassifier/predict_proba" - }, - "sklearn/sklearn.ensemble._bagging/BaggingRegressor/__init__": { - "target": "sklearn/sklearn.ensemble._bagging/BaggingRegressor/__init__" - }, "sklearn/sklearn.ensemble._bagging/BaggingRegressor/_set_oob_score": { "target": "sklearn/sklearn.ensemble._bagging/BaggingRegressor/_set_oob_score" }, "sklearn/sklearn.ensemble._bagging/BaggingRegressor/_validate_estimator": { "target": "sklearn/sklearn.ensemble._bagging/BaggingRegressor/_validate_estimator" }, - "sklearn/sklearn.ensemble._bagging/BaggingRegressor/predict": { - "target": "sklearn/sklearn.ensemble._bagging/BaggingRegressor/predict" - }, "sklearn/sklearn.ensemble._bagging/BaseBagging/__init__": { "target": "sklearn/sklearn.ensemble._bagging/BaseBagging/__init__" }, @@ -4395,9 +7313,6 @@ "sklearn/sklearn.ensemble._bagging/BaseBagging/estimators_samples_@getter": { "target": "sklearn/sklearn.ensemble._bagging/BaseBagging/estimators_samples_@getter" }, - "sklearn/sklearn.ensemble._bagging/BaseBagging/fit": { - "target": "sklearn/sklearn.ensemble._bagging/BaseBagging/fit" - }, "sklearn/sklearn.ensemble._bagging/BaseBagging/n_features_@getter": { "target": "sklearn/sklearn.ensemble._bagging/BaseBagging/n_features_@getter" }, @@ -4482,27 +7397,15 @@ "sklearn/sklearn.ensemble._forest/BaseForest/_validate_y_class_weight": { "target": "sklearn/sklearn.ensemble._forest/BaseForest/_validate_y_class_weight" }, - "sklearn/sklearn.ensemble._forest/BaseForest/apply": { - "target": "sklearn/sklearn.ensemble._forest/BaseForest/apply" - }, "sklearn/sklearn.ensemble._forest/BaseForest/decision_path": { "target": "sklearn/sklearn.ensemble._forest/BaseForest/decision_path" }, "sklearn/sklearn.ensemble._forest/BaseForest/feature_importances_@getter": { "target": "sklearn/sklearn.ensemble._forest/BaseForest/feature_importances_@getter" }, - "sklearn/sklearn.ensemble._forest/BaseForest/fit": { - "target": "sklearn/sklearn.ensemble._forest/BaseForest/fit" - }, "sklearn/sklearn.ensemble._forest/BaseForest/n_features_@getter": { "target": "sklearn/sklearn.ensemble._forest/BaseForest/n_features_@getter" }, - "sklearn/sklearn.ensemble._forest/ExtraTreesClassifier/__init__": { - "target": "sklearn/sklearn.ensemble._forest/ExtraTreesClassifier/__init__" - }, - "sklearn/sklearn.ensemble._forest/ExtraTreesRegressor/__init__": { - "target": "sklearn/sklearn.ensemble._forest/ExtraTreesRegressor/__init__" - }, "sklearn/sklearn.ensemble._forest/ForestClassifier/__init__": { "target": "sklearn/sklearn.ensemble._forest/ForestClassifier/__init__" }, @@ -4518,15 +7421,6 @@ "sklearn/sklearn.ensemble._forest/ForestClassifier/_validate_y_class_weight": { "target": "sklearn/sklearn.ensemble._forest/ForestClassifier/_validate_y_class_weight" }, - "sklearn/sklearn.ensemble._forest/ForestClassifier/predict": { - "target": "sklearn/sklearn.ensemble._forest/ForestClassifier/predict" - }, - "sklearn/sklearn.ensemble._forest/ForestClassifier/predict_log_proba": { - "target": "sklearn/sklearn.ensemble._forest/ForestClassifier/predict_log_proba" - }, - "sklearn/sklearn.ensemble._forest/ForestClassifier/predict_proba": { - "target": "sklearn/sklearn.ensemble._forest/ForestClassifier/predict_proba" - }, "sklearn/sklearn.ensemble._forest/ForestRegressor/__init__": { "target": "sklearn/sklearn.ensemble._forest/ForestRegressor/__init__" }, @@ -4542,33 +7436,15 @@ "sklearn/sklearn.ensemble._forest/ForestRegressor/_set_oob_score_and_attributes": { "target": "sklearn/sklearn.ensemble._forest/ForestRegressor/_set_oob_score_and_attributes" }, - "sklearn/sklearn.ensemble._forest/ForestRegressor/predict": { - "target": "sklearn/sklearn.ensemble._forest/ForestRegressor/predict" - }, - "sklearn/sklearn.ensemble._forest/RandomForestClassifier/__init__": { - "target": "sklearn/sklearn.ensemble._forest/RandomForestClassifier/__init__" - }, - "sklearn/sklearn.ensemble._forest/RandomForestRegressor/__init__": { - "target": "sklearn/sklearn.ensemble._forest/RandomForestRegressor/__init__" - }, - "sklearn/sklearn.ensemble._forest/RandomTreesEmbedding/__init__": { - "target": "sklearn/sklearn.ensemble._forest/RandomTreesEmbedding/__init__" - }, "sklearn/sklearn.ensemble._forest/RandomTreesEmbedding/_set_oob_score_and_attributes": { "target": "sklearn/sklearn.ensemble._forest/RandomTreesEmbedding/_set_oob_score_and_attributes" }, - "sklearn/sklearn.ensemble._forest/RandomTreesEmbedding/fit": { - "target": "sklearn/sklearn.ensemble._forest/RandomTreesEmbedding/fit" - }, "sklearn/sklearn.ensemble._forest/RandomTreesEmbedding/fit_transform": { "target": "sklearn/sklearn.ensemble._forest/RandomTreesEmbedding/fit_transform" }, "sklearn/sklearn.ensemble._forest/RandomTreesEmbedding/get_feature_names_out": { "target": "sklearn/sklearn.ensemble._forest/RandomTreesEmbedding/get_feature_names_out" }, - "sklearn/sklearn.ensemble._forest/RandomTreesEmbedding/transform": { - "target": "sklearn/sklearn.ensemble._forest/RandomTreesEmbedding/transform" - }, "sklearn/sklearn.ensemble._forest/_accumulate_prediction": { "target": "sklearn/sklearn.ensemble._forest/_accumulate_prediction" }, @@ -4629,63 +7505,27 @@ "sklearn/sklearn.ensemble._gb/BaseGradientBoosting/_validate_y": { "target": "sklearn/sklearn.ensemble._gb/BaseGradientBoosting/_validate_y" }, - "sklearn/sklearn.ensemble._gb/BaseGradientBoosting/apply": { - "target": "sklearn/sklearn.ensemble._gb/BaseGradientBoosting/apply" - }, "sklearn/sklearn.ensemble._gb/BaseGradientBoosting/feature_importances_@getter": { "target": "sklearn/sklearn.ensemble._gb/BaseGradientBoosting/feature_importances_@getter" }, - "sklearn/sklearn.ensemble._gb/BaseGradientBoosting/fit": { - "target": "sklearn/sklearn.ensemble._gb/BaseGradientBoosting/fit" - }, "sklearn/sklearn.ensemble._gb/BaseGradientBoosting/loss_@getter": { "target": "sklearn/sklearn.ensemble._gb/BaseGradientBoosting/loss_@getter" }, "sklearn/sklearn.ensemble._gb/BaseGradientBoosting/n_features_@getter": { "target": "sklearn/sklearn.ensemble._gb/BaseGradientBoosting/n_features_@getter" }, - "sklearn/sklearn.ensemble._gb/GradientBoostingClassifier/__init__": { - "target": "sklearn/sklearn.ensemble._gb/GradientBoostingClassifier/__init__" - }, "sklearn/sklearn.ensemble._gb/GradientBoostingClassifier/_validate_y": { "target": "sklearn/sklearn.ensemble._gb/GradientBoostingClassifier/_validate_y" }, "sklearn/sklearn.ensemble._gb/GradientBoostingClassifier/decision_function": { "target": "sklearn/sklearn.ensemble._gb/GradientBoostingClassifier/decision_function" }, - "sklearn/sklearn.ensemble._gb/GradientBoostingClassifier/predict": { - "target": "sklearn/sklearn.ensemble._gb/GradientBoostingClassifier/predict" - }, - "sklearn/sklearn.ensemble._gb/GradientBoostingClassifier/predict_log_proba": { - "target": "sklearn/sklearn.ensemble._gb/GradientBoostingClassifier/predict_log_proba" - }, - "sklearn/sklearn.ensemble._gb/GradientBoostingClassifier/predict_proba": { - "target": "sklearn/sklearn.ensemble._gb/GradientBoostingClassifier/predict_proba" - }, - "sklearn/sklearn.ensemble._gb/GradientBoostingClassifier/staged_decision_function": { - "target": "sklearn/sklearn.ensemble._gb/GradientBoostingClassifier/staged_decision_function" - }, - "sklearn/sklearn.ensemble._gb/GradientBoostingClassifier/staged_predict": { - "target": "sklearn/sklearn.ensemble._gb/GradientBoostingClassifier/staged_predict" - }, - "sklearn/sklearn.ensemble._gb/GradientBoostingClassifier/staged_predict_proba": { - "target": "sklearn/sklearn.ensemble._gb/GradientBoostingClassifier/staged_predict_proba" - }, - "sklearn/sklearn.ensemble._gb/GradientBoostingRegressor/__init__": { - "target": "sklearn/sklearn.ensemble._gb/GradientBoostingRegressor/__init__" - }, "sklearn/sklearn.ensemble._gb/GradientBoostingRegressor/_validate_y": { "target": "sklearn/sklearn.ensemble._gb/GradientBoostingRegressor/_validate_y" }, "sklearn/sklearn.ensemble._gb/GradientBoostingRegressor/apply": { "target": "sklearn/sklearn.ensemble._gb/GradientBoostingRegressor/apply" }, - "sklearn/sklearn.ensemble._gb/GradientBoostingRegressor/predict": { - "target": "sklearn/sklearn.ensemble._gb/GradientBoostingRegressor/predict" - }, - "sklearn/sklearn.ensemble._gb/GradientBoostingRegressor/staged_predict": { - "target": "sklearn/sklearn.ensemble._gb/GradientBoostingRegressor/staged_predict" - }, "sklearn/sklearn.ensemble._gb/VerboseReporter/__init__": { "target": "sklearn/sklearn.ensemble._gb/VerboseReporter/__init__" }, @@ -4932,15 +7772,9 @@ "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/BaseHistGradientBoosting/_validate_parameters": { "target": "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/BaseHistGradientBoosting/_validate_parameters" }, - "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/BaseHistGradientBoosting/fit": { - "target": "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/BaseHistGradientBoosting/fit" - }, "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/BaseHistGradientBoosting/n_iter_@getter": { "target": "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/BaseHistGradientBoosting/n_iter_@getter" }, - "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingClassifier/__init__": { - "target": "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingClassifier/__init__" - }, "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingClassifier/_encode_y": { "target": "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingClassifier/_encode_y" }, @@ -4950,12 +7784,6 @@ "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingClassifier/decision_function": { "target": "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingClassifier/decision_function" }, - "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingClassifier/predict": { - "target": "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingClassifier/predict" - }, - "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingClassifier/predict_proba": { - "target": "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingClassifier/predict_proba" - }, "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingClassifier/staged_decision_function": { "target": "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingClassifier/staged_decision_function" }, @@ -4965,18 +7793,12 @@ "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingClassifier/staged_predict_proba": { "target": "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingClassifier/staged_predict_proba" }, - "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingRegressor/__init__": { - "target": "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingRegressor/__init__" - }, "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingRegressor/_encode_y": { "target": "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingRegressor/_encode_y" }, "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingRegressor/_get_loss": { "target": "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingRegressor/_get_loss" }, - "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingRegressor/predict": { - "target": "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingRegressor/predict" - }, "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingRegressor/staged_predict": { "target": "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingRegressor/staged_predict" }, @@ -5043,9 +7865,6 @@ "sklearn/sklearn.ensemble._hist_gradient_boosting.predictor/TreePredictor/predict_binned": { "target": "sklearn/sklearn.ensemble._hist_gradient_boosting.predictor/TreePredictor/predict_binned" }, - "sklearn/sklearn.ensemble._iforest/IsolationForest/__init__": { - "target": "sklearn/sklearn.ensemble._iforest/IsolationForest/__init__" - }, "sklearn/sklearn.ensemble._iforest/IsolationForest/_compute_chunked_score_samples": { "target": "sklearn/sklearn.ensemble._iforest/IsolationForest/_compute_chunked_score_samples" }, @@ -5061,24 +7880,9 @@ "sklearn/sklearn.ensemble._iforest/IsolationForest/_set_oob_score": { "target": "sklearn/sklearn.ensemble._iforest/IsolationForest/_set_oob_score" }, - "sklearn/sklearn.ensemble._iforest/IsolationForest/decision_function": { - "target": "sklearn/sklearn.ensemble._iforest/IsolationForest/decision_function" - }, - "sklearn/sklearn.ensemble._iforest/IsolationForest/fit": { - "target": "sklearn/sklearn.ensemble._iforest/IsolationForest/fit" - }, - "sklearn/sklearn.ensemble._iforest/IsolationForest/predict": { - "target": "sklearn/sklearn.ensemble._iforest/IsolationForest/predict" - }, - "sklearn/sklearn.ensemble._iforest/IsolationForest/score_samples": { - "target": "sklearn/sklearn.ensemble._iforest/IsolationForest/score_samples" - }, "sklearn/sklearn.ensemble._iforest/_average_path_length": { "target": "sklearn/sklearn.ensemble._iforest/_average_path_length" }, - "sklearn/sklearn.ensemble._stacking/StackingClassifier/__init__": { - "target": "sklearn/sklearn.ensemble._stacking/StackingClassifier/__init__" - }, "sklearn/sklearn.ensemble._stacking/StackingClassifier/_sk_visual_block_": { "target": "sklearn/sklearn.ensemble._stacking/StackingClassifier/_sk_visual_block_" }, @@ -5088,30 +7892,15 @@ "sklearn/sklearn.ensemble._stacking/StackingClassifier/decision_function": { "target": "sklearn/sklearn.ensemble._stacking/StackingClassifier/decision_function" }, - "sklearn/sklearn.ensemble._stacking/StackingClassifier/fit": { - "target": "sklearn/sklearn.ensemble._stacking/StackingClassifier/fit" - }, - "sklearn/sklearn.ensemble._stacking/StackingClassifier/predict": { - "target": "sklearn/sklearn.ensemble._stacking/StackingClassifier/predict" - }, - "sklearn/sklearn.ensemble._stacking/StackingClassifier/predict_proba": { - "target": "sklearn/sklearn.ensemble._stacking/StackingClassifier/predict_proba" - }, "sklearn/sklearn.ensemble._stacking/StackingClassifier/transform": { "target": "sklearn/sklearn.ensemble._stacking/StackingClassifier/transform" }, - "sklearn/sklearn.ensemble._stacking/StackingRegressor/__init__": { - "target": "sklearn/sklearn.ensemble._stacking/StackingRegressor/__init__" - }, "sklearn/sklearn.ensemble._stacking/StackingRegressor/_sk_visual_block_": { "target": "sklearn/sklearn.ensemble._stacking/StackingRegressor/_sk_visual_block_" }, "sklearn/sklearn.ensemble._stacking/StackingRegressor/_validate_final_estimator": { "target": "sklearn/sklearn.ensemble._stacking/StackingRegressor/_validate_final_estimator" }, - "sklearn/sklearn.ensemble._stacking/StackingRegressor/fit": { - "target": "sklearn/sklearn.ensemble._stacking/StackingRegressor/fit" - }, "sklearn/sklearn.ensemble._stacking/StackingRegressor/transform": { "target": "sklearn/sklearn.ensemble._stacking/StackingRegressor/transform" }, @@ -5142,48 +7931,24 @@ "sklearn/sklearn.ensemble._stacking/_BaseStacking/n_features_in_@getter": { "target": "sklearn/sklearn.ensemble._stacking/_BaseStacking/n_features_in_@getter" }, - "sklearn/sklearn.ensemble._stacking/_BaseStacking/predict": { - "target": "sklearn/sklearn.ensemble._stacking/_BaseStacking/predict" - }, "sklearn/sklearn.ensemble._stacking/_estimator_has": { "target": "sklearn/sklearn.ensemble._stacking/_estimator_has" }, - "sklearn/sklearn.ensemble._voting/VotingClassifier/__init__": { - "target": "sklearn/sklearn.ensemble._voting/VotingClassifier/__init__" - }, "sklearn/sklearn.ensemble._voting/VotingClassifier/_check_voting": { "target": "sklearn/sklearn.ensemble._voting/VotingClassifier/_check_voting" }, "sklearn/sklearn.ensemble._voting/VotingClassifier/_collect_probas": { "target": "sklearn/sklearn.ensemble._voting/VotingClassifier/_collect_probas" }, - "sklearn/sklearn.ensemble._voting/VotingClassifier/fit": { - "target": "sklearn/sklearn.ensemble._voting/VotingClassifier/fit" - }, "sklearn/sklearn.ensemble._voting/VotingClassifier/get_feature_names_out": { "target": "sklearn/sklearn.ensemble._voting/VotingClassifier/get_feature_names_out" }, - "sklearn/sklearn.ensemble._voting/VotingClassifier/predict": { - "target": "sklearn/sklearn.ensemble._voting/VotingClassifier/predict" - }, - "sklearn/sklearn.ensemble._voting/VotingClassifier/predict_proba": { - "target": "sklearn/sklearn.ensemble._voting/VotingClassifier/predict_proba" - }, "sklearn/sklearn.ensemble._voting/VotingClassifier/transform": { "target": "sklearn/sklearn.ensemble._voting/VotingClassifier/transform" }, - "sklearn/sklearn.ensemble._voting/VotingRegressor/__init__": { - "target": "sklearn/sklearn.ensemble._voting/VotingRegressor/__init__" - }, - "sklearn/sklearn.ensemble._voting/VotingRegressor/fit": { - "target": "sklearn/sklearn.ensemble._voting/VotingRegressor/fit" - }, "sklearn/sklearn.ensemble._voting/VotingRegressor/get_feature_names_out": { "target": "sklearn/sklearn.ensemble._voting/VotingRegressor/get_feature_names_out" }, - "sklearn/sklearn.ensemble._voting/VotingRegressor/predict": { - "target": "sklearn/sklearn.ensemble._voting/VotingRegressor/predict" - }, "sklearn/sklearn.ensemble._voting/VotingRegressor/transform": { "target": "sklearn/sklearn.ensemble._voting/VotingRegressor/transform" }, @@ -5202,18 +7967,12 @@ "sklearn/sklearn.ensemble._voting/_BaseVoting/_weights_not_none@getter": { "target": "sklearn/sklearn.ensemble._voting/_BaseVoting/_weights_not_none@getter" }, - "sklearn/sklearn.ensemble._voting/_BaseVoting/fit": { - "target": "sklearn/sklearn.ensemble._voting/_BaseVoting/fit" - }, "sklearn/sklearn.ensemble._voting/_BaseVoting/fit_transform": { "target": "sklearn/sklearn.ensemble._voting/_BaseVoting/fit_transform" }, "sklearn/sklearn.ensemble._voting/_BaseVoting/n_features_in_@getter": { "target": "sklearn/sklearn.ensemble._voting/_BaseVoting/n_features_in_@getter" }, - "sklearn/sklearn.ensemble._weight_boosting/AdaBoostClassifier/__init__": { - "target": "sklearn/sklearn.ensemble._weight_boosting/AdaBoostClassifier/__init__" - }, "sklearn/sklearn.ensemble._weight_boosting/AdaBoostClassifier/_boost": { "target": "sklearn/sklearn.ensemble._weight_boosting/AdaBoostClassifier/_boost" }, @@ -5232,30 +7991,15 @@ "sklearn/sklearn.ensemble._weight_boosting/AdaBoostClassifier/decision_function": { "target": "sklearn/sklearn.ensemble._weight_boosting/AdaBoostClassifier/decision_function" }, - "sklearn/sklearn.ensemble._weight_boosting/AdaBoostClassifier/fit": { - "target": "sklearn/sklearn.ensemble._weight_boosting/AdaBoostClassifier/fit" - }, - "sklearn/sklearn.ensemble._weight_boosting/AdaBoostClassifier/predict": { - "target": "sklearn/sklearn.ensemble._weight_boosting/AdaBoostClassifier/predict" - }, "sklearn/sklearn.ensemble._weight_boosting/AdaBoostClassifier/predict_log_proba": { "target": "sklearn/sklearn.ensemble._weight_boosting/AdaBoostClassifier/predict_log_proba" }, - "sklearn/sklearn.ensemble._weight_boosting/AdaBoostClassifier/predict_proba": { - "target": "sklearn/sklearn.ensemble._weight_boosting/AdaBoostClassifier/predict_proba" - }, "sklearn/sklearn.ensemble._weight_boosting/AdaBoostClassifier/staged_decision_function": { "target": "sklearn/sklearn.ensemble._weight_boosting/AdaBoostClassifier/staged_decision_function" }, - "sklearn/sklearn.ensemble._weight_boosting/AdaBoostClassifier/staged_predict": { - "target": "sklearn/sklearn.ensemble._weight_boosting/AdaBoostClassifier/staged_predict" - }, "sklearn/sklearn.ensemble._weight_boosting/AdaBoostClassifier/staged_predict_proba": { "target": "sklearn/sklearn.ensemble._weight_boosting/AdaBoostClassifier/staged_predict_proba" }, - "sklearn/sklearn.ensemble._weight_boosting/AdaBoostRegressor/__init__": { - "target": "sklearn/sklearn.ensemble._weight_boosting/AdaBoostRegressor/__init__" - }, "sklearn/sklearn.ensemble._weight_boosting/AdaBoostRegressor/_boost": { "target": "sklearn/sklearn.ensemble._weight_boosting/AdaBoostRegressor/_boost" }, @@ -5265,12 +8009,6 @@ "sklearn/sklearn.ensemble._weight_boosting/AdaBoostRegressor/_validate_estimator": { "target": "sklearn/sklearn.ensemble._weight_boosting/AdaBoostRegressor/_validate_estimator" }, - "sklearn/sklearn.ensemble._weight_boosting/AdaBoostRegressor/fit": { - "target": "sklearn/sklearn.ensemble._weight_boosting/AdaBoostRegressor/fit" - }, - "sklearn/sklearn.ensemble._weight_boosting/AdaBoostRegressor/predict": { - "target": "sklearn/sklearn.ensemble._weight_boosting/AdaBoostRegressor/predict" - }, "sklearn/sklearn.ensemble._weight_boosting/AdaBoostRegressor/staged_predict": { "target": "sklearn/sklearn.ensemble._weight_boosting/AdaBoostRegressor/staged_predict" }, @@ -5643,15 +8381,9 @@ "sklearn/sklearn.externals._packaging.version/_parse_version_parts": { "target": "sklearn/sklearn.externals._packaging.version/_parse_version_parts" }, - "sklearn/sklearn.externals._packaging.version/parse": { - "target": "sklearn/sklearn.externals._packaging.version/parse" - }, "sklearn/sklearn.externals.conftest/pytest_ignore_collect": { "target": "sklearn/sklearn.externals.conftest/pytest_ignore_collect" }, - "sklearn/sklearn.feature_extraction._dict_vectorizer/DictVectorizer/__init__": { - "target": "sklearn/sklearn.feature_extraction._dict_vectorizer/DictVectorizer/__init__" - }, "sklearn/sklearn.feature_extraction._dict_vectorizer/DictVectorizer/_add_iterable_element": { "target": "sklearn/sklearn.feature_extraction._dict_vectorizer/DictVectorizer/_add_iterable_element" }, @@ -5661,15 +8393,6 @@ "sklearn/sklearn.feature_extraction._dict_vectorizer/DictVectorizer/_transform": { "target": "sklearn/sklearn.feature_extraction._dict_vectorizer/DictVectorizer/_transform" }, - "sklearn/sklearn.feature_extraction._dict_vectorizer/DictVectorizer/fit": { - "target": "sklearn/sklearn.feature_extraction._dict_vectorizer/DictVectorizer/fit" - }, - "sklearn/sklearn.feature_extraction._dict_vectorizer/DictVectorizer/fit_transform": { - "target": "sklearn/sklearn.feature_extraction._dict_vectorizer/DictVectorizer/fit_transform" - }, - "sklearn/sklearn.feature_extraction._dict_vectorizer/DictVectorizer/get_feature_names": { - "target": "sklearn/sklearn.feature_extraction._dict_vectorizer/DictVectorizer/get_feature_names" - }, "sklearn/sklearn.feature_extraction._dict_vectorizer/DictVectorizer/get_feature_names_out": { "target": "sklearn/sklearn.feature_extraction._dict_vectorizer/DictVectorizer/get_feature_names_out" }, @@ -5679,15 +8402,9 @@ "sklearn/sklearn.feature_extraction._dict_vectorizer/DictVectorizer/restrict": { "target": "sklearn/sklearn.feature_extraction._dict_vectorizer/DictVectorizer/restrict" }, - "sklearn/sklearn.feature_extraction._dict_vectorizer/DictVectorizer/transform": { - "target": "sklearn/sklearn.feature_extraction._dict_vectorizer/DictVectorizer/transform" - }, "sklearn/sklearn.feature_extraction._dict_vectorizer/_tosequence": { "target": "sklearn/sklearn.feature_extraction._dict_vectorizer/_tosequence" }, - "sklearn/sklearn.feature_extraction._hash/FeatureHasher/__init__": { - "target": "sklearn/sklearn.feature_extraction._hash/FeatureHasher/__init__" - }, "sklearn/sklearn.feature_extraction._hash/FeatureHasher/_more_tags": { "target": "sklearn/sklearn.feature_extraction._hash/FeatureHasher/_more_tags" }, @@ -5697,9 +8414,6 @@ "sklearn/sklearn.feature_extraction._hash/FeatureHasher/fit": { "target": "sklearn/sklearn.feature_extraction._hash/FeatureHasher/fit" }, - "sklearn/sklearn.feature_extraction._hash/FeatureHasher/transform": { - "target": "sklearn/sklearn.feature_extraction._hash/FeatureHasher/transform" - }, "sklearn/sklearn.feature_extraction._hash/_iteritems": { "target": "sklearn/sklearn.feature_extraction._hash/_iteritems" }, @@ -5805,39 +8519,15 @@ "sklearn/sklearn.feature_extraction.text/_VectorizerMixin/_validate_params": { "target": "sklearn/sklearn.feature_extraction.text/_VectorizerMixin/_validate_params" }, - "sklearn/sklearn.feature_extraction.text/_VectorizerMixin/_validate_vocabulary": { - "target": "sklearn/sklearn.feature_extraction.text/_VectorizerMixin/_validate_vocabulary" - }, "sklearn/sklearn.feature_extraction.text/_VectorizerMixin/_warn_for_unused_params": { "target": "sklearn/sklearn.feature_extraction.text/_VectorizerMixin/_warn_for_unused_params" }, - "sklearn/sklearn.feature_extraction.text/_VectorizerMixin/_word_ngrams": { - "target": "sklearn/sklearn.feature_extraction.text/_VectorizerMixin/_word_ngrams" - }, - "sklearn/sklearn.feature_extraction.text/_VectorizerMixin/build_analyzer": { - "target": "sklearn/sklearn.feature_extraction.text/_VectorizerMixin/build_analyzer" - }, - "sklearn/sklearn.feature_extraction.text/_VectorizerMixin/build_preprocessor": { - "target": "sklearn/sklearn.feature_extraction.text/_VectorizerMixin/build_preprocessor" - }, - "sklearn/sklearn.feature_extraction.text/_VectorizerMixin/build_tokenizer": { - "target": "sklearn/sklearn.feature_extraction.text/_VectorizerMixin/build_tokenizer" - }, - "sklearn/sklearn.feature_extraction.text/_VectorizerMixin/decode": { - "target": "sklearn/sklearn.feature_extraction.text/_VectorizerMixin/decode" - }, - "sklearn/sklearn.feature_extraction.text/_VectorizerMixin/get_stop_words": { - "target": "sklearn/sklearn.feature_extraction.text/_VectorizerMixin/get_stop_words" - }, "sklearn/sklearn.feature_extraction.text/_analyze": { "target": "sklearn/sklearn.feature_extraction.text/_analyze" }, "sklearn/sklearn.feature_extraction.text/_check_stop_list": { "target": "sklearn/sklearn.feature_extraction.text/_check_stop_list" }, - "sklearn/sklearn.feature_extraction.text/_document_frequency": { - "target": "sklearn/sklearn.feature_extraction.text/_document_frequency" - }, "sklearn/sklearn.feature_extraction.text/_make_int_array": { "target": "sklearn/sklearn.feature_extraction.text/_make_int_array" }, @@ -5862,21 +8552,9 @@ "sklearn/sklearn.feature_selection._base/SelectorMixin/get_feature_names_out": { "target": "sklearn/sklearn.feature_selection._base/SelectorMixin/get_feature_names_out" }, - "sklearn/sklearn.feature_selection._base/SelectorMixin/get_support": { - "target": "sklearn/sklearn.feature_selection._base/SelectorMixin/get_support" - }, - "sklearn/sklearn.feature_selection._base/SelectorMixin/inverse_transform": { - "target": "sklearn/sklearn.feature_selection._base/SelectorMixin/inverse_transform" - }, - "sklearn/sklearn.feature_selection._base/SelectorMixin/transform": { - "target": "sklearn/sklearn.feature_selection._base/SelectorMixin/transform" - }, "sklearn/sklearn.feature_selection._base/_get_feature_importances": { "target": "sklearn/sklearn.feature_selection._base/_get_feature_importances" }, - "sklearn/sklearn.feature_selection._from_model/SelectFromModel/__init__": { - "target": "sklearn/sklearn.feature_selection._from_model/SelectFromModel/__init__" - }, "sklearn/sklearn.feature_selection._from_model/SelectFromModel/_check_max_features": { "target": "sklearn/sklearn.feature_selection._from_model/SelectFromModel/_check_max_features" }, @@ -5886,9 +8564,6 @@ "sklearn/sklearn.feature_selection._from_model/SelectFromModel/_more_tags": { "target": "sklearn/sklearn.feature_selection._from_model/SelectFromModel/_more_tags" }, - "sklearn/sklearn.feature_selection._from_model/SelectFromModel/fit": { - "target": "sklearn/sklearn.feature_selection._from_model/SelectFromModel/fit" - }, "sklearn/sklearn.feature_selection._from_model/SelectFromModel/n_features_in_@getter": { "target": "sklearn/sklearn.feature_selection._from_model/SelectFromModel/n_features_in_@getter" }, @@ -5919,9 +8594,6 @@ "sklearn/sklearn.feature_selection._mutual_info/_iterate_columns": { "target": "sklearn/sklearn.feature_selection._mutual_info/_iterate_columns" }, - "sklearn/sklearn.feature_selection._rfe/RFE/__init__": { - "target": "sklearn/sklearn.feature_selection._rfe/RFE/__init__" - }, "sklearn/sklearn.feature_selection._rfe/RFE/_estimator_type@getter": { "target": "sklearn/sklearn.feature_selection._rfe/RFE/_estimator_type@getter" }, @@ -5940,27 +8612,9 @@ "sklearn/sklearn.feature_selection._rfe/RFE/decision_function": { "target": "sklearn/sklearn.feature_selection._rfe/RFE/decision_function" }, - "sklearn/sklearn.feature_selection._rfe/RFE/fit": { - "target": "sklearn/sklearn.feature_selection._rfe/RFE/fit" - }, - "sklearn/sklearn.feature_selection._rfe/RFE/predict": { - "target": "sklearn/sklearn.feature_selection._rfe/RFE/predict" - }, "sklearn/sklearn.feature_selection._rfe/RFE/predict_log_proba": { "target": "sklearn/sklearn.feature_selection._rfe/RFE/predict_log_proba" }, - "sklearn/sklearn.feature_selection._rfe/RFE/predict_proba": { - "target": "sklearn/sklearn.feature_selection._rfe/RFE/predict_proba" - }, - "sklearn/sklearn.feature_selection._rfe/RFE/score": { - "target": "sklearn/sklearn.feature_selection._rfe/RFE/score" - }, - "sklearn/sklearn.feature_selection._rfe/RFECV/__init__": { - "target": "sklearn/sklearn.feature_selection._rfe/RFECV/__init__" - }, - "sklearn/sklearn.feature_selection._rfe/RFECV/fit": { - "target": "sklearn/sklearn.feature_selection._rfe/RFECV/fit" - }, "sklearn/sklearn.feature_selection._rfe/RFECV/grid_scores_@getter": { "target": "sklearn/sklearn.feature_selection._rfe/RFECV/grid_scores_@getter" }, @@ -5970,9 +8624,6 @@ "sklearn/sklearn.feature_selection._rfe/_rfe_single_fit": { "target": "sklearn/sklearn.feature_selection._rfe/_rfe_single_fit" }, - "sklearn/sklearn.feature_selection._sequential/SequentialFeatureSelector/__init__": { - "target": "sklearn/sklearn.feature_selection._sequential/SequentialFeatureSelector/__init__" - }, "sklearn/sklearn.feature_selection._sequential/SequentialFeatureSelector/_get_best_new_feature_score": { "target": "sklearn/sklearn.feature_selection._sequential/SequentialFeatureSelector/_get_best_new_feature_score" }, @@ -5982,12 +8633,6 @@ "sklearn/sklearn.feature_selection._sequential/SequentialFeatureSelector/_more_tags": { "target": "sklearn/sklearn.feature_selection._sequential/SequentialFeatureSelector/_more_tags" }, - "sklearn/sklearn.feature_selection._sequential/SequentialFeatureSelector/fit": { - "target": "sklearn/sklearn.feature_selection._sequential/SequentialFeatureSelector/fit" - }, - "sklearn/sklearn.feature_selection._univariate_selection/GenericUnivariateSelect/__init__": { - "target": "sklearn/sklearn.feature_selection._univariate_selection/GenericUnivariateSelect/__init__" - }, "sklearn/sklearn.feature_selection._univariate_selection/GenericUnivariateSelect/_check_params": { "target": "sklearn/sklearn.feature_selection._univariate_selection/GenericUnivariateSelect/_check_params" }, @@ -6000,36 +8645,21 @@ "sklearn/sklearn.feature_selection._univariate_selection/GenericUnivariateSelect/_more_tags": { "target": "sklearn/sklearn.feature_selection._univariate_selection/GenericUnivariateSelect/_more_tags" }, - "sklearn/sklearn.feature_selection._univariate_selection/SelectFdr/__init__": { - "target": "sklearn/sklearn.feature_selection._univariate_selection/SelectFdr/__init__" - }, "sklearn/sklearn.feature_selection._univariate_selection/SelectFdr/_get_support_mask": { "target": "sklearn/sklearn.feature_selection._univariate_selection/SelectFdr/_get_support_mask" }, - "sklearn/sklearn.feature_selection._univariate_selection/SelectFpr/__init__": { - "target": "sklearn/sklearn.feature_selection._univariate_selection/SelectFpr/__init__" - }, "sklearn/sklearn.feature_selection._univariate_selection/SelectFpr/_get_support_mask": { "target": "sklearn/sklearn.feature_selection._univariate_selection/SelectFpr/_get_support_mask" }, - "sklearn/sklearn.feature_selection._univariate_selection/SelectFwe/__init__": { - "target": "sklearn/sklearn.feature_selection._univariate_selection/SelectFwe/__init__" - }, "sklearn/sklearn.feature_selection._univariate_selection/SelectFwe/_get_support_mask": { "target": "sklearn/sklearn.feature_selection._univariate_selection/SelectFwe/_get_support_mask" }, - "sklearn/sklearn.feature_selection._univariate_selection/SelectKBest/__init__": { - "target": "sklearn/sklearn.feature_selection._univariate_selection/SelectKBest/__init__" - }, "sklearn/sklearn.feature_selection._univariate_selection/SelectKBest/_check_params": { "target": "sklearn/sklearn.feature_selection._univariate_selection/SelectKBest/_check_params" }, "sklearn/sklearn.feature_selection._univariate_selection/SelectKBest/_get_support_mask": { "target": "sklearn/sklearn.feature_selection._univariate_selection/SelectKBest/_get_support_mask" }, - "sklearn/sklearn.feature_selection._univariate_selection/SelectPercentile/__init__": { - "target": "sklearn/sklearn.feature_selection._univariate_selection/SelectPercentile/__init__" - }, "sklearn/sklearn.feature_selection._univariate_selection/SelectPercentile/_check_params": { "target": "sklearn/sklearn.feature_selection._univariate_selection/SelectPercentile/_check_params" }, @@ -6045,9 +8675,6 @@ "sklearn/sklearn.feature_selection._univariate_selection/_BaseFilter/_more_tags": { "target": "sklearn/sklearn.feature_selection._univariate_selection/_BaseFilter/_more_tags" }, - "sklearn/sklearn.feature_selection._univariate_selection/_BaseFilter/fit": { - "target": "sklearn/sklearn.feature_selection._univariate_selection/_BaseFilter/fit" - }, "sklearn/sklearn.feature_selection._univariate_selection/_chisquare": { "target": "sklearn/sklearn.feature_selection._univariate_selection/_chisquare" }, @@ -6060,33 +8687,18 @@ "sklearn/sklearn.feature_selection._univariate_selection/r_regression": { "target": "sklearn/sklearn.feature_selection._univariate_selection/r_regression" }, - "sklearn/sklearn.feature_selection._variance_threshold/VarianceThreshold/__init__": { - "target": "sklearn/sklearn.feature_selection._variance_threshold/VarianceThreshold/__init__" - }, "sklearn/sklearn.feature_selection._variance_threshold/VarianceThreshold/_get_support_mask": { "target": "sklearn/sklearn.feature_selection._variance_threshold/VarianceThreshold/_get_support_mask" }, "sklearn/sklearn.feature_selection._variance_threshold/VarianceThreshold/_more_tags": { "target": "sklearn/sklearn.feature_selection._variance_threshold/VarianceThreshold/_more_tags" }, - "sklearn/sklearn.feature_selection._variance_threshold/VarianceThreshold/fit": { - "target": "sklearn/sklearn.feature_selection._variance_threshold/VarianceThreshold/fit" - }, - "sklearn/sklearn.gaussian_process._gpc/GaussianProcessClassifier/__init__": { - "target": "sklearn/sklearn.gaussian_process._gpc/GaussianProcessClassifier/__init__" - }, - "sklearn/sklearn.gaussian_process._gpc/GaussianProcessClassifier/fit": { - "target": "sklearn/sklearn.gaussian_process._gpc/GaussianProcessClassifier/fit" - }, "sklearn/sklearn.gaussian_process._gpc/GaussianProcessClassifier/kernel_@getter": { "target": "sklearn/sklearn.gaussian_process._gpc/GaussianProcessClassifier/kernel_@getter" }, "sklearn/sklearn.gaussian_process._gpc/GaussianProcessClassifier/log_marginal_likelihood": { "target": "sklearn/sklearn.gaussian_process._gpc/GaussianProcessClassifier/log_marginal_likelihood" }, - "sklearn/sklearn.gaussian_process._gpc/GaussianProcessClassifier/predict": { - "target": "sklearn/sklearn.gaussian_process._gpc/GaussianProcessClassifier/predict" - }, "sklearn/sklearn.gaussian_process._gpc/GaussianProcessClassifier/predict_proba": { "target": "sklearn/sklearn.gaussian_process._gpc/GaussianProcessClassifier/predict_proba" }, @@ -6111,24 +8723,12 @@ "sklearn/sklearn.gaussian_process._gpc/_BinaryGaussianProcessClassifierLaplace/predict_proba": { "target": "sklearn/sklearn.gaussian_process._gpc/_BinaryGaussianProcessClassifierLaplace/predict_proba" }, - "sklearn/sklearn.gaussian_process._gpr/GaussianProcessRegressor/__init__": { - "target": "sklearn/sklearn.gaussian_process._gpr/GaussianProcessRegressor/__init__" - }, "sklearn/sklearn.gaussian_process._gpr/GaussianProcessRegressor/_constrained_optimization": { "target": "sklearn/sklearn.gaussian_process._gpr/GaussianProcessRegressor/_constrained_optimization" }, "sklearn/sklearn.gaussian_process._gpr/GaussianProcessRegressor/_more_tags": { "target": "sklearn/sklearn.gaussian_process._gpr/GaussianProcessRegressor/_more_tags" }, - "sklearn/sklearn.gaussian_process._gpr/GaussianProcessRegressor/fit": { - "target": "sklearn/sklearn.gaussian_process._gpr/GaussianProcessRegressor/fit" - }, - "sklearn/sklearn.gaussian_process._gpr/GaussianProcessRegressor/log_marginal_likelihood": { - "target": "sklearn/sklearn.gaussian_process._gpr/GaussianProcessRegressor/log_marginal_likelihood" - }, - "sklearn/sklearn.gaussian_process._gpr/GaussianProcessRegressor/predict": { - "target": "sklearn/sklearn.gaussian_process._gpr/GaussianProcessRegressor/predict" - }, "sklearn/sklearn.gaussian_process._gpr/GaussianProcessRegressor/sample_y": { "target": "sklearn/sklearn.gaussian_process._gpr/GaussianProcessRegressor/sample_y" }, @@ -6426,9 +9026,6 @@ "sklearn/sklearn.gaussian_process.kernels/_check_length_scale": { "target": "sklearn/sklearn.gaussian_process.kernels/_check_length_scale" }, - "sklearn/sklearn.impute._base/MissingIndicator/__init__": { - "target": "sklearn/sklearn.impute._base/MissingIndicator/__init__" - }, "sklearn/sklearn.impute._base/MissingIndicator/_fit": { "target": "sklearn/sklearn.impute._base/MissingIndicator/_fit" }, @@ -6441,21 +9038,9 @@ "sklearn/sklearn.impute._base/MissingIndicator/_validate_input": { "target": "sklearn/sklearn.impute._base/MissingIndicator/_validate_input" }, - "sklearn/sklearn.impute._base/MissingIndicator/fit": { - "target": "sklearn/sklearn.impute._base/MissingIndicator/fit" - }, - "sklearn/sklearn.impute._base/MissingIndicator/fit_transform": { - "target": "sklearn/sklearn.impute._base/MissingIndicator/fit_transform" - }, "sklearn/sklearn.impute._base/MissingIndicator/get_feature_names_out": { "target": "sklearn/sklearn.impute._base/MissingIndicator/get_feature_names_out" }, - "sklearn/sklearn.impute._base/MissingIndicator/transform": { - "target": "sklearn/sklearn.impute._base/MissingIndicator/transform" - }, - "sklearn/sklearn.impute._base/SimpleImputer/__init__": { - "target": "sklearn/sklearn.impute._base/SimpleImputer/__init__" - }, "sklearn/sklearn.impute._base/SimpleImputer/_dense_fit": { "target": "sklearn/sklearn.impute._base/SimpleImputer/_dense_fit" }, @@ -6468,18 +9053,12 @@ "sklearn/sklearn.impute._base/SimpleImputer/_validate_input": { "target": "sklearn/sklearn.impute._base/SimpleImputer/_validate_input" }, - "sklearn/sklearn.impute._base/SimpleImputer/fit": { - "target": "sklearn/sklearn.impute._base/SimpleImputer/fit" - }, "sklearn/sklearn.impute._base/SimpleImputer/get_feature_names_out": { "target": "sklearn/sklearn.impute._base/SimpleImputer/get_feature_names_out" }, "sklearn/sklearn.impute._base/SimpleImputer/inverse_transform": { "target": "sklearn/sklearn.impute._base/SimpleImputer/inverse_transform" }, - "sklearn/sklearn.impute._base/SimpleImputer/transform": { - "target": "sklearn/sklearn.impute._base/SimpleImputer/transform" - }, "sklearn/sklearn.impute._base/_BaseImputer/__init__": { "target": "sklearn/sklearn.impute._base/_BaseImputer/__init__" }, @@ -6504,9 +9083,6 @@ "sklearn/sklearn.impute._base/_most_frequent": { "target": "sklearn/sklearn.impute._base/_most_frequent" }, - "sklearn/sklearn.impute._iterative/IterativeImputer/__init__": { - "target": "sklearn/sklearn.impute._iterative/IterativeImputer/__init__" - }, "sklearn/sklearn.impute._iterative/IterativeImputer/_get_abs_corr_mat": { "target": "sklearn/sklearn.impute._iterative/IterativeImputer/_get_abs_corr_mat" }, @@ -6525,33 +9101,15 @@ "sklearn/sklearn.impute._iterative/IterativeImputer/_validate_limit": { "target": "sklearn/sklearn.impute._iterative/IterativeImputer/_validate_limit" }, - "sklearn/sklearn.impute._iterative/IterativeImputer/fit": { - "target": "sklearn/sklearn.impute._iterative/IterativeImputer/fit" - }, - "sklearn/sklearn.impute._iterative/IterativeImputer/fit_transform": { - "target": "sklearn/sklearn.impute._iterative/IterativeImputer/fit_transform" - }, "sklearn/sklearn.impute._iterative/IterativeImputer/get_feature_names_out": { "target": "sklearn/sklearn.impute._iterative/IterativeImputer/get_feature_names_out" }, - "sklearn/sklearn.impute._iterative/IterativeImputer/transform": { - "target": "sklearn/sklearn.impute._iterative/IterativeImputer/transform" - }, - "sklearn/sklearn.impute._knn/KNNImputer/__init__": { - "target": "sklearn/sklearn.impute._knn/KNNImputer/__init__" - }, "sklearn/sklearn.impute._knn/KNNImputer/_calc_impute": { "target": "sklearn/sklearn.impute._knn/KNNImputer/_calc_impute" }, - "sklearn/sklearn.impute._knn/KNNImputer/fit": { - "target": "sklearn/sklearn.impute._knn/KNNImputer/fit" - }, "sklearn/sklearn.impute._knn/KNNImputer/get_feature_names_out": { "target": "sklearn/sklearn.impute._knn/KNNImputer/get_feature_names_out" }, - "sklearn/sklearn.impute._knn/KNNImputer/transform": { - "target": "sklearn/sklearn.impute._knn/KNNImputer/transform" - }, "sklearn/sklearn.impute/__getattr__": { "target": "sklearn/sklearn.impute/__getattr__" }, @@ -6702,12 +9260,6 @@ "sklearn/sklearn.linear_model._base/LinearClassifierMixin/_predict_proba_lr": { "target": "sklearn/sklearn.linear_model._base/LinearClassifierMixin/_predict_proba_lr" }, - "sklearn/sklearn.linear_model._base/LinearClassifierMixin/decision_function": { - "target": "sklearn/sklearn.linear_model._base/LinearClassifierMixin/decision_function" - }, - "sklearn/sklearn.linear_model._base/LinearClassifierMixin/predict": { - "target": "sklearn/sklearn.linear_model._base/LinearClassifierMixin/predict" - }, "sklearn/sklearn.linear_model._base/LinearModel/_decision_function": { "target": "sklearn/sklearn.linear_model._base/LinearModel/_decision_function" }, @@ -6720,21 +9272,9 @@ "sklearn/sklearn.linear_model._base/LinearModel/fit": { "target": "sklearn/sklearn.linear_model._base/LinearModel/fit" }, - "sklearn/sklearn.linear_model._base/LinearModel/predict": { - "target": "sklearn/sklearn.linear_model._base/LinearModel/predict" - }, - "sklearn/sklearn.linear_model._base/LinearRegression/__init__": { - "target": "sklearn/sklearn.linear_model._base/LinearRegression/__init__" - }, - "sklearn/sklearn.linear_model._base/LinearRegression/fit": { - "target": "sklearn/sklearn.linear_model._base/LinearRegression/fit" - }, "sklearn/sklearn.linear_model._base/SparseCoefMixin/densify": { "target": "sklearn/sklearn.linear_model._base/SparseCoefMixin/densify" }, - "sklearn/sklearn.linear_model._base/SparseCoefMixin/sparsify": { - "target": "sklearn/sklearn.linear_model._base/SparseCoefMixin/sparsify" - }, "sklearn/sklearn.linear_model._base/_check_precomputed_gram_matrix": { "target": "sklearn/sklearn.linear_model._base/_check_precomputed_gram_matrix" }, @@ -6753,54 +9293,24 @@ "sklearn/sklearn.linear_model._base/make_dataset": { "target": "sklearn/sklearn.linear_model._base/make_dataset" }, - "sklearn/sklearn.linear_model._bayes/ARDRegression/__init__": { - "target": "sklearn/sklearn.linear_model._bayes/ARDRegression/__init__" - }, "sklearn/sklearn.linear_model._bayes/ARDRegression/_update_sigma": { "target": "sklearn/sklearn.linear_model._bayes/ARDRegression/_update_sigma" }, "sklearn/sklearn.linear_model._bayes/ARDRegression/_update_sigma_woodbury": { "target": "sklearn/sklearn.linear_model._bayes/ARDRegression/_update_sigma_woodbury" }, - "sklearn/sklearn.linear_model._bayes/ARDRegression/fit": { - "target": "sklearn/sklearn.linear_model._bayes/ARDRegression/fit" - }, - "sklearn/sklearn.linear_model._bayes/ARDRegression/predict": { - "target": "sklearn/sklearn.linear_model._bayes/ARDRegression/predict" - }, - "sklearn/sklearn.linear_model._bayes/BayesianRidge/__init__": { - "target": "sklearn/sklearn.linear_model._bayes/BayesianRidge/__init__" - }, - "sklearn/sklearn.linear_model._bayes/BayesianRidge/_check_params": { - "target": "sklearn/sklearn.linear_model._bayes/BayesianRidge/_check_params" - }, "sklearn/sklearn.linear_model._bayes/BayesianRidge/_log_marginal_likelihood": { "target": "sklearn/sklearn.linear_model._bayes/BayesianRidge/_log_marginal_likelihood" }, "sklearn/sklearn.linear_model._bayes/BayesianRidge/_update_coef_": { "target": "sklearn/sklearn.linear_model._bayes/BayesianRidge/_update_coef_" }, - "sklearn/sklearn.linear_model._bayes/BayesianRidge/fit": { - "target": "sklearn/sklearn.linear_model._bayes/BayesianRidge/fit" - }, - "sklearn/sklearn.linear_model._bayes/BayesianRidge/predict": { - "target": "sklearn/sklearn.linear_model._bayes/BayesianRidge/predict" - }, - "sklearn/sklearn.linear_model._coordinate_descent/ElasticNet/__init__": { - "target": "sklearn/sklearn.linear_model._coordinate_descent/ElasticNet/__init__" - }, "sklearn/sklearn.linear_model._coordinate_descent/ElasticNet/_decision_function": { "target": "sklearn/sklearn.linear_model._coordinate_descent/ElasticNet/_decision_function" }, - "sklearn/sklearn.linear_model._coordinate_descent/ElasticNet/fit": { - "target": "sklearn/sklearn.linear_model._coordinate_descent/ElasticNet/fit" - }, "sklearn/sklearn.linear_model._coordinate_descent/ElasticNet/sparse_coef_@getter": { "target": "sklearn/sklearn.linear_model._coordinate_descent/ElasticNet/sparse_coef_@getter" }, - "sklearn/sklearn.linear_model._coordinate_descent/ElasticNetCV/__init__": { - "target": "sklearn/sklearn.linear_model._coordinate_descent/ElasticNetCV/__init__" - }, "sklearn/sklearn.linear_model._coordinate_descent/ElasticNetCV/_get_estimator": { "target": "sklearn/sklearn.linear_model._coordinate_descent/ElasticNetCV/_get_estimator" }, @@ -6810,12 +9320,6 @@ "sklearn/sklearn.linear_model._coordinate_descent/ElasticNetCV/_more_tags": { "target": "sklearn/sklearn.linear_model._coordinate_descent/ElasticNetCV/_more_tags" }, - "sklearn/sklearn.linear_model._coordinate_descent/Lasso/__init__": { - "target": "sklearn/sklearn.linear_model._coordinate_descent/Lasso/__init__" - }, - "sklearn/sklearn.linear_model._coordinate_descent/LassoCV/__init__": { - "target": "sklearn/sklearn.linear_model._coordinate_descent/LassoCV/__init__" - }, "sklearn/sklearn.linear_model._coordinate_descent/LassoCV/_get_estimator": { "target": "sklearn/sklearn.linear_model._coordinate_descent/LassoCV/_get_estimator" }, @@ -6837,24 +9341,12 @@ "sklearn/sklearn.linear_model._coordinate_descent/LinearModelCV/_more_tags": { "target": "sklearn/sklearn.linear_model._coordinate_descent/LinearModelCV/_more_tags" }, - "sklearn/sklearn.linear_model._coordinate_descent/LinearModelCV/fit": { - "target": "sklearn/sklearn.linear_model._coordinate_descent/LinearModelCV/fit" - }, "sklearn/sklearn.linear_model._coordinate_descent/LinearModelCV/path": { "target": "sklearn/sklearn.linear_model._coordinate_descent/LinearModelCV/path" }, - "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNet/__init__": { - "target": "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNet/__init__" - }, "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNet/_more_tags": { "target": "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNet/_more_tags" }, - "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNet/fit": { - "target": "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNet/fit" - }, - "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNetCV/__init__": { - "target": "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNetCV/__init__" - }, "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNetCV/_get_estimator": { "target": "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNetCV/_get_estimator" }, @@ -6900,21 +9392,12 @@ "sklearn/sklearn.linear_model._coordinate_descent/lasso_path": { "target": "sklearn/sklearn.linear_model._coordinate_descent/lasso_path" }, - "sklearn/sklearn.linear_model._glm.glm/GammaRegressor/__init__": { - "target": "sklearn/sklearn.linear_model._glm.glm/GammaRegressor/__init__" - }, "sklearn/sklearn.linear_model._glm.glm/GammaRegressor/_get_loss": { "target": "sklearn/sklearn.linear_model._glm.glm/GammaRegressor/_get_loss" }, - "sklearn/sklearn.linear_model._glm.glm/PoissonRegressor/__init__": { - "target": "sklearn/sklearn.linear_model._glm.glm/PoissonRegressor/__init__" - }, "sklearn/sklearn.linear_model._glm.glm/PoissonRegressor/_get_loss": { "target": "sklearn/sklearn.linear_model._glm.glm/PoissonRegressor/_get_loss" }, - "sklearn/sklearn.linear_model._glm.glm/TweedieRegressor/__init__": { - "target": "sklearn/sklearn.linear_model._glm.glm/TweedieRegressor/__init__" - }, "sklearn/sklearn.linear_model._glm.glm/TweedieRegressor/_get_loss": { "target": "sklearn/sklearn.linear_model._glm.glm/TweedieRegressor/_get_loss" }, @@ -6942,54 +9425,24 @@ "sklearn/sklearn.linear_model._glm.glm/_GeneralizedLinearRegressor/score": { "target": "sklearn/sklearn.linear_model._glm.glm/_GeneralizedLinearRegressor/score" }, - "sklearn/sklearn.linear_model._huber/HuberRegressor/__init__": { - "target": "sklearn/sklearn.linear_model._huber/HuberRegressor/__init__" - }, - "sklearn/sklearn.linear_model._huber/HuberRegressor/fit": { - "target": "sklearn/sklearn.linear_model._huber/HuberRegressor/fit" - }, "sklearn/sklearn.linear_model._huber/_huber_loss_and_gradient": { "target": "sklearn/sklearn.linear_model._huber/_huber_loss_and_gradient" }, - "sklearn/sklearn.linear_model._least_angle/Lars/__init__": { - "target": "sklearn/sklearn.linear_model._least_angle/Lars/__init__" - }, "sklearn/sklearn.linear_model._least_angle/Lars/_fit": { "target": "sklearn/sklearn.linear_model._least_angle/Lars/_fit" }, "sklearn/sklearn.linear_model._least_angle/Lars/_get_gram": { "target": "sklearn/sklearn.linear_model._least_angle/Lars/_get_gram" }, - "sklearn/sklearn.linear_model._least_angle/Lars/fit": { - "target": "sklearn/sklearn.linear_model._least_angle/Lars/fit" - }, - "sklearn/sklearn.linear_model._least_angle/LarsCV/__init__": { - "target": "sklearn/sklearn.linear_model._least_angle/LarsCV/__init__" - }, "sklearn/sklearn.linear_model._least_angle/LarsCV/_more_tags": { "target": "sklearn/sklearn.linear_model._least_angle/LarsCV/_more_tags" }, - "sklearn/sklearn.linear_model._least_angle/LarsCV/fit": { - "target": "sklearn/sklearn.linear_model._least_angle/LarsCV/fit" - }, - "sklearn/sklearn.linear_model._least_angle/LassoLars/__init__": { - "target": "sklearn/sklearn.linear_model._least_angle/LassoLars/__init__" - }, - "sklearn/sklearn.linear_model._least_angle/LassoLarsCV/__init__": { - "target": "sklearn/sklearn.linear_model._least_angle/LassoLarsCV/__init__" - }, - "sklearn/sklearn.linear_model._least_angle/LassoLarsIC/__init__": { - "target": "sklearn/sklearn.linear_model._least_angle/LassoLarsIC/__init__" - }, "sklearn/sklearn.linear_model._least_angle/LassoLarsIC/_estimate_noise_variance": { "target": "sklearn/sklearn.linear_model._least_angle/LassoLarsIC/_estimate_noise_variance" }, "sklearn/sklearn.linear_model._least_angle/LassoLarsIC/_more_tags": { "target": "sklearn/sklearn.linear_model._least_angle/LassoLarsIC/_more_tags" }, - "sklearn/sklearn.linear_model._least_angle/LassoLarsIC/fit": { - "target": "sklearn/sklearn.linear_model._least_angle/LassoLarsIC/fit" - }, "sklearn/sklearn.linear_model._least_angle/_check_copy_and_writeable": { "target": "sklearn/sklearn.linear_model._least_angle/_check_copy_and_writeable" }, @@ -7023,30 +9476,9 @@ "sklearn/sklearn.linear_model._linear_loss/LinearModelLoss/loss_gradient": { "target": "sklearn/sklearn.linear_model._linear_loss/LinearModelLoss/loss_gradient" }, - "sklearn/sklearn.linear_model._logistic/LogisticRegression/__init__": { - "target": "sklearn/sklearn.linear_model._logistic/LogisticRegression/__init__" - }, - "sklearn/sklearn.linear_model._logistic/LogisticRegression/fit": { - "target": "sklearn/sklearn.linear_model._logistic/LogisticRegression/fit" - }, - "sklearn/sklearn.linear_model._logistic/LogisticRegression/predict_log_proba": { - "target": "sklearn/sklearn.linear_model._logistic/LogisticRegression/predict_log_proba" - }, - "sklearn/sklearn.linear_model._logistic/LogisticRegression/predict_proba": { - "target": "sklearn/sklearn.linear_model._logistic/LogisticRegression/predict_proba" - }, - "sklearn/sklearn.linear_model._logistic/LogisticRegressionCV/__init__": { - "target": "sklearn/sklearn.linear_model._logistic/LogisticRegressionCV/__init__" - }, "sklearn/sklearn.linear_model._logistic/LogisticRegressionCV/_more_tags": { "target": "sklearn/sklearn.linear_model._logistic/LogisticRegressionCV/_more_tags" }, - "sklearn/sklearn.linear_model._logistic/LogisticRegressionCV/fit": { - "target": "sklearn/sklearn.linear_model._logistic/LogisticRegressionCV/fit" - }, - "sklearn/sklearn.linear_model._logistic/LogisticRegressionCV/score": { - "target": "sklearn/sklearn.linear_model._logistic/LogisticRegressionCV/score" - }, "sklearn/sklearn.linear_model._logistic/_check_multi_class": { "target": "sklearn/sklearn.linear_model._logistic/_check_multi_class" }, @@ -7059,15 +9491,6 @@ "sklearn/sklearn.linear_model._logistic/_logistic_regression_path": { "target": "sklearn/sklearn.linear_model._logistic/_logistic_regression_path" }, - "sklearn/sklearn.linear_model._omp/OrthogonalMatchingPursuit/__init__": { - "target": "sklearn/sklearn.linear_model._omp/OrthogonalMatchingPursuit/__init__" - }, - "sklearn/sklearn.linear_model._omp/OrthogonalMatchingPursuit/fit": { - "target": "sklearn/sklearn.linear_model._omp/OrthogonalMatchingPursuit/fit" - }, - "sklearn/sklearn.linear_model._omp/OrthogonalMatchingPursuitCV/__init__": { - "target": "sklearn/sklearn.linear_model._omp/OrthogonalMatchingPursuitCV/__init__" - }, "sklearn/sklearn.linear_model._omp/OrthogonalMatchingPursuitCV/fit": { "target": "sklearn/sklearn.linear_model._omp/OrthogonalMatchingPursuitCV/fit" }, @@ -7086,84 +9509,33 @@ "sklearn/sklearn.linear_model._omp/orthogonal_mp_gram": { "target": "sklearn/sklearn.linear_model._omp/orthogonal_mp_gram" }, - "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveClassifier/__init__": { - "target": "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveClassifier/__init__" - }, - "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveClassifier/fit": { - "target": "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveClassifier/fit" - }, "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveClassifier/partial_fit": { "target": "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveClassifier/partial_fit" }, - "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveRegressor/__init__": { - "target": "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveRegressor/__init__" - }, - "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveRegressor/fit": { - "target": "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveRegressor/fit" - }, "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveRegressor/partial_fit": { "target": "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveRegressor/partial_fit" }, - "sklearn/sklearn.linear_model._perceptron/Perceptron/__init__": { - "target": "sklearn/sklearn.linear_model._perceptron/Perceptron/__init__" - }, "sklearn/sklearn.linear_model._quantile/QuantileRegressor/__init__": { "target": "sklearn/sklearn.linear_model._quantile/QuantileRegressor/__init__" }, "sklearn/sklearn.linear_model._quantile/QuantileRegressor/fit": { "target": "sklearn/sklearn.linear_model._quantile/QuantileRegressor/fit" }, - "sklearn/sklearn.linear_model._ransac/RANSACRegressor/__init__": { - "target": "sklearn/sklearn.linear_model._ransac/RANSACRegressor/__init__" - }, "sklearn/sklearn.linear_model._ransac/RANSACRegressor/_more_tags": { "target": "sklearn/sklearn.linear_model._ransac/RANSACRegressor/_more_tags" }, - "sklearn/sklearn.linear_model._ransac/RANSACRegressor/fit": { - "target": "sklearn/sklearn.linear_model._ransac/RANSACRegressor/fit" - }, - "sklearn/sklearn.linear_model._ransac/RANSACRegressor/predict": { - "target": "sklearn/sklearn.linear_model._ransac/RANSACRegressor/predict" - }, - "sklearn/sklearn.linear_model._ransac/RANSACRegressor/score": { - "target": "sklearn/sklearn.linear_model._ransac/RANSACRegressor/score" - }, "sklearn/sklearn.linear_model._ransac/_dynamic_max_trials": { "target": "sklearn/sklearn.linear_model._ransac/_dynamic_max_trials" }, - "sklearn/sklearn.linear_model._ridge/Ridge/__init__": { - "target": "sklearn/sklearn.linear_model._ridge/Ridge/__init__" - }, - "sklearn/sklearn.linear_model._ridge/Ridge/fit": { - "target": "sklearn/sklearn.linear_model._ridge/Ridge/fit" - }, - "sklearn/sklearn.linear_model._ridge/RidgeClassifier/__init__": { - "target": "sklearn/sklearn.linear_model._ridge/RidgeClassifier/__init__" - }, - "sklearn/sklearn.linear_model._ridge/RidgeClassifier/fit": { - "target": "sklearn/sklearn.linear_model._ridge/RidgeClassifier/fit" - }, - "sklearn/sklearn.linear_model._ridge/RidgeClassifierCV/__init__": { - "target": "sklearn/sklearn.linear_model._ridge/RidgeClassifierCV/__init__" - }, "sklearn/sklearn.linear_model._ridge/RidgeClassifierCV/_more_tags": { "target": "sklearn/sklearn.linear_model._ridge/RidgeClassifierCV/_more_tags" }, - "sklearn/sklearn.linear_model._ridge/RidgeClassifierCV/fit": { - "target": "sklearn/sklearn.linear_model._ridge/RidgeClassifierCV/fit" - }, "sklearn/sklearn.linear_model._ridge/_BaseRidge/__init__": { "target": "sklearn/sklearn.linear_model._ridge/_BaseRidge/__init__" }, "sklearn/sklearn.linear_model._ridge/_BaseRidge/fit": { "target": "sklearn/sklearn.linear_model._ridge/_BaseRidge/fit" }, - "sklearn/sklearn.linear_model._ridge/_BaseRidgeCV/__init__": { - "target": "sklearn/sklearn.linear_model._ridge/_BaseRidgeCV/__init__" - }, - "sklearn/sklearn.linear_model._ridge/_BaseRidgeCV/fit": { - "target": "sklearn/sklearn.linear_model._ridge/_BaseRidgeCV/fit" - }, "sklearn/sklearn.linear_model._ridge/_IdentityClassifier/__init__": { "target": "sklearn/sklearn.linear_model._ridge/_IdentityClassifier/__init__" }, @@ -7185,9 +9557,6 @@ "sklearn/sklearn.linear_model._ridge/_RidgeClassifierMixin/classes_@getter": { "target": "sklearn/sklearn.linear_model._ridge/_RidgeClassifierMixin/classes_@getter" }, - "sklearn/sklearn.linear_model._ridge/_RidgeClassifierMixin/predict": { - "target": "sklearn/sklearn.linear_model._ridge/_RidgeClassifierMixin/predict" - }, "sklearn/sklearn.linear_model._ridge/_RidgeGCV/__init__": { "target": "sklearn/sklearn.linear_model._ridge/_RidgeGCV/__init__" }, @@ -7338,12 +9707,6 @@ "sklearn/sklearn.linear_model._stochastic_gradient/BaseSGDClassifier/_partial_fit": { "target": "sklearn/sklearn.linear_model._stochastic_gradient/BaseSGDClassifier/_partial_fit" }, - "sklearn/sklearn.linear_model._stochastic_gradient/BaseSGDClassifier/fit": { - "target": "sklearn/sklearn.linear_model._stochastic_gradient/BaseSGDClassifier/fit" - }, - "sklearn/sklearn.linear_model._stochastic_gradient/BaseSGDClassifier/partial_fit": { - "target": "sklearn/sklearn.linear_model._stochastic_gradient/BaseSGDClassifier/partial_fit" - }, "sklearn/sklearn.linear_model._stochastic_gradient/BaseSGDRegressor/__init__": { "target": "sklearn/sklearn.linear_model._stochastic_gradient/BaseSGDRegressor/__init__" }, @@ -7359,30 +9722,15 @@ "sklearn/sklearn.linear_model._stochastic_gradient/BaseSGDRegressor/_partial_fit": { "target": "sklearn/sklearn.linear_model._stochastic_gradient/BaseSGDRegressor/_partial_fit" }, - "sklearn/sklearn.linear_model._stochastic_gradient/BaseSGDRegressor/fit": { - "target": "sklearn/sklearn.linear_model._stochastic_gradient/BaseSGDRegressor/fit" - }, "sklearn/sklearn.linear_model._stochastic_gradient/BaseSGDRegressor/partial_fit": { "target": "sklearn/sklearn.linear_model._stochastic_gradient/BaseSGDRegressor/partial_fit" }, - "sklearn/sklearn.linear_model._stochastic_gradient/BaseSGDRegressor/predict": { - "target": "sklearn/sklearn.linear_model._stochastic_gradient/BaseSGDRegressor/predict" - }, - "sklearn/sklearn.linear_model._stochastic_gradient/SGDClassifier/__init__": { - "target": "sklearn/sklearn.linear_model._stochastic_gradient/SGDClassifier/__init__" - }, "sklearn/sklearn.linear_model._stochastic_gradient/SGDClassifier/_check_proba": { "target": "sklearn/sklearn.linear_model._stochastic_gradient/SGDClassifier/_check_proba" }, "sklearn/sklearn.linear_model._stochastic_gradient/SGDClassifier/_more_tags": { "target": "sklearn/sklearn.linear_model._stochastic_gradient/SGDClassifier/_more_tags" }, - "sklearn/sklearn.linear_model._stochastic_gradient/SGDClassifier/predict_log_proba": { - "target": "sklearn/sklearn.linear_model._stochastic_gradient/SGDClassifier/predict_log_proba" - }, - "sklearn/sklearn.linear_model._stochastic_gradient/SGDClassifier/predict_proba": { - "target": "sklearn/sklearn.linear_model._stochastic_gradient/SGDClassifier/predict_proba" - }, "sklearn/sklearn.linear_model._stochastic_gradient/SGDOneClassSVM/__init__": { "target": "sklearn/sklearn.linear_model._stochastic_gradient/SGDOneClassSVM/__init__" }, @@ -7416,9 +9764,6 @@ "sklearn/sklearn.linear_model._stochastic_gradient/SGDOneClassSVM/score_samples": { "target": "sklearn/sklearn.linear_model._stochastic_gradient/SGDOneClassSVM/score_samples" }, - "sklearn/sklearn.linear_model._stochastic_gradient/SGDRegressor/__init__": { - "target": "sklearn/sklearn.linear_model._stochastic_gradient/SGDRegressor/__init__" - }, "sklearn/sklearn.linear_model._stochastic_gradient/SGDRegressor/_more_tags": { "target": "sklearn/sklearn.linear_model._stochastic_gradient/SGDRegressor/_more_tags" }, @@ -7434,15 +9779,9 @@ "sklearn/sklearn.linear_model._stochastic_gradient/fit_binary": { "target": "sklearn/sklearn.linear_model._stochastic_gradient/fit_binary" }, - "sklearn/sklearn.linear_model._theil_sen/TheilSenRegressor/__init__": { - "target": "sklearn/sklearn.linear_model._theil_sen/TheilSenRegressor/__init__" - }, "sklearn/sklearn.linear_model._theil_sen/TheilSenRegressor/_check_subparams": { "target": "sklearn/sklearn.linear_model._theil_sen/TheilSenRegressor/_check_subparams" }, - "sklearn/sklearn.linear_model._theil_sen/TheilSenRegressor/fit": { - "target": "sklearn/sklearn.linear_model._theil_sen/TheilSenRegressor/fit" - }, "sklearn/sklearn.linear_model._theil_sen/_breakdown_point": { "target": "sklearn/sklearn.linear_model._theil_sen/_breakdown_point" }, @@ -7458,39 +9797,18 @@ "sklearn/sklearn.linear_model.setup/configuration": { "target": "sklearn/sklearn.linear_model.setup/configuration" }, - "sklearn/sklearn.manifold._isomap/Isomap/__init__": { - "target": "sklearn/sklearn.manifold._isomap/Isomap/__init__" - }, "sklearn/sklearn.manifold._isomap/Isomap/_fit_transform": { "target": "sklearn/sklearn.manifold._isomap/Isomap/_fit_transform" }, - "sklearn/sklearn.manifold._isomap/Isomap/fit": { - "target": "sklearn/sklearn.manifold._isomap/Isomap/fit" - }, - "sklearn/sklearn.manifold._isomap/Isomap/fit_transform": { - "target": "sklearn/sklearn.manifold._isomap/Isomap/fit_transform" - }, "sklearn/sklearn.manifold._isomap/Isomap/reconstruction_error": { "target": "sklearn/sklearn.manifold._isomap/Isomap/reconstruction_error" }, - "sklearn/sklearn.manifold._isomap/Isomap/transform": { - "target": "sklearn/sklearn.manifold._isomap/Isomap/transform" - }, - "sklearn/sklearn.manifold._locally_linear/LocallyLinearEmbedding/__init__": { - "target": "sklearn/sklearn.manifold._locally_linear/LocallyLinearEmbedding/__init__" - }, "sklearn/sklearn.manifold._locally_linear/LocallyLinearEmbedding/_fit_transform": { "target": "sklearn/sklearn.manifold._locally_linear/LocallyLinearEmbedding/_fit_transform" }, "sklearn/sklearn.manifold._locally_linear/LocallyLinearEmbedding/fit": { "target": "sklearn/sklearn.manifold._locally_linear/LocallyLinearEmbedding/fit" }, - "sklearn/sklearn.manifold._locally_linear/LocallyLinearEmbedding/fit_transform": { - "target": "sklearn/sklearn.manifold._locally_linear/LocallyLinearEmbedding/fit_transform" - }, - "sklearn/sklearn.manifold._locally_linear/LocallyLinearEmbedding/transform": { - "target": "sklearn/sklearn.manifold._locally_linear/LocallyLinearEmbedding/transform" - }, "sklearn/sklearn.manifold._locally_linear/barycenter_kneighbors_graph": { "target": "sklearn/sklearn.manifold._locally_linear/barycenter_kneighbors_graph" }, @@ -7503,24 +9821,15 @@ "sklearn/sklearn.manifold._locally_linear/null_space": { "target": "sklearn/sklearn.manifold._locally_linear/null_space" }, - "sklearn/sklearn.manifold._mds/MDS/__init__": { - "target": "sklearn/sklearn.manifold._mds/MDS/__init__" - }, "sklearn/sklearn.manifold._mds/MDS/_more_tags": { "target": "sklearn/sklearn.manifold._mds/MDS/_more_tags" }, "sklearn/sklearn.manifold._mds/MDS/fit": { "target": "sklearn/sklearn.manifold._mds/MDS/fit" }, - "sklearn/sklearn.manifold._mds/MDS/fit_transform": { - "target": "sklearn/sklearn.manifold._mds/MDS/fit_transform" - }, "sklearn/sklearn.manifold._mds/_smacof_single": { "target": "sklearn/sklearn.manifold._mds/_smacof_single" }, - "sklearn/sklearn.manifold._spectral_embedding/SpectralEmbedding/__init__": { - "target": "sklearn/sklearn.manifold._spectral_embedding/SpectralEmbedding/__init__" - }, "sklearn/sklearn.manifold._spectral_embedding/SpectralEmbedding/_get_affinity_matrix": { "target": "sklearn/sklearn.manifold._spectral_embedding/SpectralEmbedding/_get_affinity_matrix" }, @@ -7530,9 +9839,6 @@ "sklearn/sklearn.manifold._spectral_embedding/SpectralEmbedding/fit": { "target": "sklearn/sklearn.manifold._spectral_embedding/SpectralEmbedding/fit" }, - "sklearn/sklearn.manifold._spectral_embedding/SpectralEmbedding/fit_transform": { - "target": "sklearn/sklearn.manifold._spectral_embedding/SpectralEmbedding/fit_transform" - }, "sklearn/sklearn.manifold._spectral_embedding/_graph_connected_component": { "target": "sklearn/sklearn.manifold._spectral_embedding/_graph_connected_component" }, @@ -7545,9 +9851,6 @@ "sklearn/sklearn.manifold._spectral_embedding/spectral_embedding": { "target": "sklearn/sklearn.manifold._spectral_embedding/spectral_embedding" }, - "sklearn/sklearn.manifold._t_sne/TSNE/__init__": { - "target": "sklearn/sklearn.manifold._t_sne/TSNE/__init__" - }, "sklearn/sklearn.manifold._t_sne/TSNE/_fit": { "target": "sklearn/sklearn.manifold._t_sne/TSNE/_fit" }, @@ -7560,9 +9863,6 @@ "sklearn/sklearn.manifold._t_sne/TSNE/fit": { "target": "sklearn/sklearn.manifold._t_sne/TSNE/fit" }, - "sklearn/sklearn.manifold._t_sne/TSNE/fit_transform": { - "target": "sklearn/sklearn.manifold._t_sne/TSNE/fit_transform" - }, "sklearn/sklearn.manifold._t_sne/_gradient_descent": { "target": "sklearn/sklearn.manifold._t_sne/_gradient_descent" }, @@ -7611,27 +9911,18 @@ "sklearn/sklearn.metrics._classification/_weighted_sum": { "target": "sklearn/sklearn.metrics._classification/_weighted_sum" }, - "sklearn/sklearn.metrics._classification/class_likelihood_ratios": { - "target": "sklearn/sklearn.metrics._classification/class_likelihood_ratios" - }, "sklearn/sklearn.metrics._plot.base/_check_classifier_response_method": { "target": "sklearn/sklearn.metrics._plot.base/_check_classifier_response_method" }, "sklearn/sklearn.metrics._plot.base/_get_response": { "target": "sklearn/sklearn.metrics._plot.base/_get_response" }, - "sklearn/sklearn.metrics._plot.confusion_matrix/ConfusionMatrixDisplay/__init__": { - "target": "sklearn/sklearn.metrics._plot.confusion_matrix/ConfusionMatrixDisplay/__init__" - }, "sklearn/sklearn.metrics._plot.confusion_matrix/ConfusionMatrixDisplay/from_estimator": { "target": "sklearn/sklearn.metrics._plot.confusion_matrix/ConfusionMatrixDisplay/from_estimator" }, "sklearn/sklearn.metrics._plot.confusion_matrix/ConfusionMatrixDisplay/from_predictions": { "target": "sklearn/sklearn.metrics._plot.confusion_matrix/ConfusionMatrixDisplay/from_predictions" }, - "sklearn/sklearn.metrics._plot.confusion_matrix/ConfusionMatrixDisplay/plot": { - "target": "sklearn/sklearn.metrics._plot.confusion_matrix/ConfusionMatrixDisplay/plot" - }, "sklearn/sklearn.metrics._plot.det_curve/DetCurveDisplay/__init__": { "target": "sklearn/sklearn.metrics._plot.det_curve/DetCurveDisplay/__init__" }, @@ -7647,30 +9938,18 @@ "sklearn/sklearn.metrics._plot.det_curve/plot_det_curve": { "target": "sklearn/sklearn.metrics._plot.det_curve/plot_det_curve" }, - "sklearn/sklearn.metrics._plot.precision_recall_curve/PrecisionRecallDisplay/__init__": { - "target": "sklearn/sklearn.metrics._plot.precision_recall_curve/PrecisionRecallDisplay/__init__" - }, "sklearn/sklearn.metrics._plot.precision_recall_curve/PrecisionRecallDisplay/from_estimator": { "target": "sklearn/sklearn.metrics._plot.precision_recall_curve/PrecisionRecallDisplay/from_estimator" }, "sklearn/sklearn.metrics._plot.precision_recall_curve/PrecisionRecallDisplay/from_predictions": { "target": "sklearn/sklearn.metrics._plot.precision_recall_curve/PrecisionRecallDisplay/from_predictions" }, - "sklearn/sklearn.metrics._plot.precision_recall_curve/PrecisionRecallDisplay/plot": { - "target": "sklearn/sklearn.metrics._plot.precision_recall_curve/PrecisionRecallDisplay/plot" - }, - "sklearn/sklearn.metrics._plot.roc_curve/RocCurveDisplay/__init__": { - "target": "sklearn/sklearn.metrics._plot.roc_curve/RocCurveDisplay/__init__" - }, "sklearn/sklearn.metrics._plot.roc_curve/RocCurveDisplay/from_estimator": { "target": "sklearn/sklearn.metrics._plot.roc_curve/RocCurveDisplay/from_estimator" }, "sklearn/sklearn.metrics._plot.roc_curve/RocCurveDisplay/from_predictions": { "target": "sklearn/sklearn.metrics._plot.roc_curve/RocCurveDisplay/from_predictions" }, - "sklearn/sklearn.metrics._plot.roc_curve/RocCurveDisplay/plot": { - "target": "sklearn/sklearn.metrics._plot.roc_curve/RocCurveDisplay/plot" - }, "sklearn/sklearn.metrics._ranking/_binary_clf_curve": { "target": "sklearn/sklearn.metrics._ranking/_binary_clf_curve" }, @@ -7794,12 +10073,6 @@ "sklearn/sklearn.metrics._scorer/get_scorer_names": { "target": "sklearn/sklearn.metrics._scorer/get_scorer_names" }, - "sklearn/sklearn.metrics._scorer/negative_likelihood_ratio": { - "target": "sklearn/sklearn.metrics._scorer/negative_likelihood_ratio" - }, - "sklearn/sklearn.metrics._scorer/positive_likelihood_ratio": { - "target": "sklearn/sklearn.metrics._scorer/positive_likelihood_ratio" - }, "sklearn/sklearn.metrics.cluster._bicluster/_check_rows_and_columns": { "target": "sklearn/sklearn.metrics.cluster._bicluster/_check_rows_and_columns" }, @@ -7935,9 +10208,6 @@ "sklearn/sklearn.mixture._base/BaseMixture/_estimate_log_weights": { "target": "sklearn/sklearn.mixture._base/BaseMixture/_estimate_log_weights" }, - "sklearn/sklearn.mixture._base/BaseMixture/_estimate_weighted_log_prob": { - "target": "sklearn/sklearn.mixture._base/BaseMixture/_estimate_weighted_log_prob" - }, "sklearn/sklearn.mixture._base/BaseMixture/_get_parameters": { "target": "sklearn/sklearn.mixture._base/BaseMixture/_get_parameters" }, @@ -7962,33 +10232,9 @@ "sklearn/sklearn.mixture._base/BaseMixture/_set_parameters": { "target": "sklearn/sklearn.mixture._base/BaseMixture/_set_parameters" }, - "sklearn/sklearn.mixture._base/BaseMixture/fit": { - "target": "sklearn/sklearn.mixture._base/BaseMixture/fit" - }, - "sklearn/sklearn.mixture._base/BaseMixture/fit_predict": { - "target": "sklearn/sklearn.mixture._base/BaseMixture/fit_predict" - }, - "sklearn/sklearn.mixture._base/BaseMixture/predict": { - "target": "sklearn/sklearn.mixture._base/BaseMixture/predict" - }, - "sklearn/sklearn.mixture._base/BaseMixture/predict_proba": { - "target": "sklearn/sklearn.mixture._base/BaseMixture/predict_proba" - }, - "sklearn/sklearn.mixture._base/BaseMixture/sample": { - "target": "sklearn/sklearn.mixture._base/BaseMixture/sample" - }, - "sklearn/sklearn.mixture._base/BaseMixture/score": { - "target": "sklearn/sklearn.mixture._base/BaseMixture/score" - }, - "sklearn/sklearn.mixture._base/BaseMixture/score_samples": { - "target": "sklearn/sklearn.mixture._base/BaseMixture/score_samples" - }, "sklearn/sklearn.mixture._base/_check_shape": { "target": "sklearn/sklearn.mixture._base/_check_shape" }, - "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/__init__": { - "target": "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/__init__" - }, "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/_check_means_parameters": { "target": "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/_check_means_parameters" }, @@ -8037,9 +10283,6 @@ "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/_get_parameters": { "target": "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/_get_parameters" }, - "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/_initialize": { - "target": "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/_initialize" - }, "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/_m_step": { "target": "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/_m_step" }, @@ -8052,9 +10295,6 @@ "sklearn/sklearn.mixture._bayesian_mixture/_log_wishart_norm": { "target": "sklearn/sklearn.mixture._bayesian_mixture/_log_wishart_norm" }, - "sklearn/sklearn.mixture._gaussian_mixture/GaussianMixture/__init__": { - "target": "sklearn/sklearn.mixture._gaussian_mixture/GaussianMixture/__init__" - }, "sklearn/sklearn.mixture._gaussian_mixture/GaussianMixture/_check_parameters": { "target": "sklearn/sklearn.mixture._gaussian_mixture/GaussianMixture/_check_parameters" }, @@ -8070,9 +10310,6 @@ "sklearn/sklearn.mixture._gaussian_mixture/GaussianMixture/_get_parameters": { "target": "sklearn/sklearn.mixture._gaussian_mixture/GaussianMixture/_get_parameters" }, - "sklearn/sklearn.mixture._gaussian_mixture/GaussianMixture/_initialize": { - "target": "sklearn/sklearn.mixture._gaussian_mixture/GaussianMixture/_initialize" - }, "sklearn/sklearn.mixture._gaussian_mixture/GaussianMixture/_m_step": { "target": "sklearn/sklearn.mixture._gaussian_mixture/GaussianMixture/_m_step" }, @@ -8082,12 +10319,6 @@ "sklearn/sklearn.mixture._gaussian_mixture/GaussianMixture/_set_parameters": { "target": "sklearn/sklearn.mixture._gaussian_mixture/GaussianMixture/_set_parameters" }, - "sklearn/sklearn.mixture._gaussian_mixture/GaussianMixture/aic": { - "target": "sklearn/sklearn.mixture._gaussian_mixture/GaussianMixture/aic" - }, - "sklearn/sklearn.mixture._gaussian_mixture/GaussianMixture/bic": { - "target": "sklearn/sklearn.mixture._gaussian_mixture/GaussianMixture/bic" - }, "sklearn/sklearn.mixture._gaussian_mixture/_check_means": { "target": "sklearn/sklearn.mixture._gaussian_mixture/_check_means" }, @@ -8157,35 +10388,17 @@ "sklearn/sklearn.model_selection._search/BaseSearchCV/decision_function": { "target": "sklearn/sklearn.model_selection._search/BaseSearchCV/decision_function" }, - "sklearn/sklearn.model_selection._search/BaseSearchCV/fit": { - "target": "sklearn/sklearn.model_selection._search/BaseSearchCV/fit" - }, "sklearn/sklearn.model_selection._search/BaseSearchCV/inverse_transform": { "target": "sklearn/sklearn.model_selection._search/BaseSearchCV/inverse_transform" }, "sklearn/sklearn.model_selection._search/BaseSearchCV/n_features_in_@getter": { "target": "sklearn/sklearn.model_selection._search/BaseSearchCV/n_features_in_@getter" }, - "sklearn/sklearn.model_selection._search/BaseSearchCV/predict": { - "target": "sklearn/sklearn.model_selection._search/BaseSearchCV/predict" - }, "sklearn/sklearn.model_selection._search/BaseSearchCV/predict_log_proba": { "target": "sklearn/sklearn.model_selection._search/BaseSearchCV/predict_log_proba" }, - "sklearn/sklearn.model_selection._search/BaseSearchCV/predict_proba": { - "target": "sklearn/sklearn.model_selection._search/BaseSearchCV/predict_proba" - }, - "sklearn/sklearn.model_selection._search/BaseSearchCV/score": { - "target": "sklearn/sklearn.model_selection._search/BaseSearchCV/score" - }, "sklearn/sklearn.model_selection._search/BaseSearchCV/score_samples": { - "target": "sklearn/sklearn.model_selection._search/BaseSearchCV/score_samples" - }, - "sklearn/sklearn.model_selection._search/BaseSearchCV/transform": { - "target": "sklearn/sklearn.model_selection._search/BaseSearchCV/transform" - }, - "sklearn/sklearn.model_selection._search/GridSearchCV/__init__": { - "target": "sklearn/sklearn.model_selection._search/GridSearchCV/__init__" + "target": "sklearn/sklearn.model_selection._search/BaseSearchCV/score_samples" }, "sklearn/sklearn.model_selection._search/GridSearchCV/_run_search": { "target": "sklearn/sklearn.model_selection._search/GridSearchCV/_run_search" @@ -8193,9 +10406,6 @@ "sklearn/sklearn.model_selection._search/ParameterGrid/__getitem__": { "target": "sklearn/sklearn.model_selection._search/ParameterGrid/__getitem__" }, - "sklearn/sklearn.model_selection._search/ParameterGrid/__init__": { - "target": "sklearn/sklearn.model_selection._search/ParameterGrid/__init__" - }, "sklearn/sklearn.model_selection._search/ParameterGrid/__iter__": { "target": "sklearn/sklearn.model_selection._search/ParameterGrid/__iter__" }, @@ -8214,9 +10424,6 @@ "sklearn/sklearn.model_selection._search/ParameterSampler/_is_all_lists": { "target": "sklearn/sklearn.model_selection._search/ParameterSampler/_is_all_lists" }, - "sklearn/sklearn.model_selection._search/RandomizedSearchCV/__init__": { - "target": "sklearn/sklearn.model_selection._search/RandomizedSearchCV/__init__" - }, "sklearn/sklearn.model_selection._search/RandomizedSearchCV/_run_search": { "target": "sklearn/sklearn.model_selection._search/RandomizedSearchCV/_run_search" }, @@ -8244,18 +10451,12 @@ "sklearn/sklearn.model_selection._search_successive_halving/BaseSuccessiveHalving/_select_best_index": { "target": "sklearn/sklearn.model_selection._search_successive_halving/BaseSuccessiveHalving/_select_best_index" }, - "sklearn/sklearn.model_selection._search_successive_halving/BaseSuccessiveHalving/fit": { - "target": "sklearn/sklearn.model_selection._search_successive_halving/BaseSuccessiveHalving/fit" - }, "sklearn/sklearn.model_selection._search_successive_halving/HalvingGridSearchCV/__init__": { "target": "sklearn/sklearn.model_selection._search_successive_halving/HalvingGridSearchCV/__init__" }, "sklearn/sklearn.model_selection._search_successive_halving/HalvingGridSearchCV/_generate_candidate_params": { "target": "sklearn/sklearn.model_selection._search_successive_halving/HalvingGridSearchCV/_generate_candidate_params" }, - "sklearn/sklearn.model_selection._search_successive_halving/HalvingRandomSearchCV/__init__": { - "target": "sklearn/sklearn.model_selection._search_successive_halving/HalvingRandomSearchCV/__init__" - }, "sklearn/sklearn.model_selection._search_successive_halving/HalvingRandomSearchCV/_generate_candidate_params": { "target": "sklearn/sklearn.model_selection._search_successive_halving/HalvingRandomSearchCV/_generate_candidate_params" }, @@ -8280,9 +10481,6 @@ "sklearn/sklearn.model_selection._split/BaseCrossValidator/get_n_splits": { "target": "sklearn/sklearn.model_selection._split/BaseCrossValidator/get_n_splits" }, - "sklearn/sklearn.model_selection._split/BaseCrossValidator/split": { - "target": "sklearn/sklearn.model_selection._split/BaseCrossValidator/split" - }, "sklearn/sklearn.model_selection._split/BaseShuffleSplit/__init__": { "target": "sklearn/sklearn.model_selection._split/BaseShuffleSplit/__init__" }, @@ -8292,51 +10490,21 @@ "sklearn/sklearn.model_selection._split/BaseShuffleSplit/_iter_indices": { "target": "sklearn/sklearn.model_selection._split/BaseShuffleSplit/_iter_indices" }, - "sklearn/sklearn.model_selection._split/BaseShuffleSplit/get_n_splits": { - "target": "sklearn/sklearn.model_selection._split/BaseShuffleSplit/get_n_splits" - }, - "sklearn/sklearn.model_selection._split/BaseShuffleSplit/split": { - "target": "sklearn/sklearn.model_selection._split/BaseShuffleSplit/split" - }, - "sklearn/sklearn.model_selection._split/GroupKFold/__init__": { - "target": "sklearn/sklearn.model_selection._split/GroupKFold/__init__" - }, "sklearn/sklearn.model_selection._split/GroupKFold/_iter_test_indices": { "target": "sklearn/sklearn.model_selection._split/GroupKFold/_iter_test_indices" }, - "sklearn/sklearn.model_selection._split/GroupKFold/split": { - "target": "sklearn/sklearn.model_selection._split/GroupKFold/split" - }, - "sklearn/sklearn.model_selection._split/GroupShuffleSplit/__init__": { - "target": "sklearn/sklearn.model_selection._split/GroupShuffleSplit/__init__" - }, "sklearn/sklearn.model_selection._split/GroupShuffleSplit/_iter_indices": { "target": "sklearn/sklearn.model_selection._split/GroupShuffleSplit/_iter_indices" }, - "sklearn/sklearn.model_selection._split/GroupShuffleSplit/split": { - "target": "sklearn/sklearn.model_selection._split/GroupShuffleSplit/split" - }, - "sklearn/sklearn.model_selection._split/KFold/__init__": { - "target": "sklearn/sklearn.model_selection._split/KFold/__init__" - }, "sklearn/sklearn.model_selection._split/KFold/_iter_test_indices": { "target": "sklearn/sklearn.model_selection._split/KFold/_iter_test_indices" }, "sklearn/sklearn.model_selection._split/LeaveOneGroupOut/_iter_test_masks": { "target": "sklearn/sklearn.model_selection._split/LeaveOneGroupOut/_iter_test_masks" }, - "sklearn/sklearn.model_selection._split/LeaveOneGroupOut/get_n_splits": { - "target": "sklearn/sklearn.model_selection._split/LeaveOneGroupOut/get_n_splits" - }, - "sklearn/sklearn.model_selection._split/LeaveOneGroupOut/split": { - "target": "sklearn/sklearn.model_selection._split/LeaveOneGroupOut/split" - }, "sklearn/sklearn.model_selection._split/LeaveOneOut/_iter_test_indices": { "target": "sklearn/sklearn.model_selection._split/LeaveOneOut/_iter_test_indices" }, - "sklearn/sklearn.model_selection._split/LeaveOneOut/get_n_splits": { - "target": "sklearn/sklearn.model_selection._split/LeaveOneOut/get_n_splits" - }, "sklearn/sklearn.model_selection._split/LeavePGroupsOut/__init__": { "target": "sklearn/sklearn.model_selection._split/LeavePGroupsOut/__init__" }, @@ -8358,27 +10526,9 @@ "sklearn/sklearn.model_selection._split/LeavePOut/get_n_splits": { "target": "sklearn/sklearn.model_selection._split/LeavePOut/get_n_splits" }, - "sklearn/sklearn.model_selection._split/PredefinedSplit/__init__": { - "target": "sklearn/sklearn.model_selection._split/PredefinedSplit/__init__" - }, "sklearn/sklearn.model_selection._split/PredefinedSplit/_iter_test_masks": { "target": "sklearn/sklearn.model_selection._split/PredefinedSplit/_iter_test_masks" }, - "sklearn/sklearn.model_selection._split/PredefinedSplit/get_n_splits": { - "target": "sklearn/sklearn.model_selection._split/PredefinedSplit/get_n_splits" - }, - "sklearn/sklearn.model_selection._split/PredefinedSplit/split": { - "target": "sklearn/sklearn.model_selection._split/PredefinedSplit/split" - }, - "sklearn/sklearn.model_selection._split/RepeatedKFold/__init__": { - "target": "sklearn/sklearn.model_selection._split/RepeatedKFold/__init__" - }, - "sklearn/sklearn.model_selection._split/RepeatedStratifiedKFold/__init__": { - "target": "sklearn/sklearn.model_selection._split/RepeatedStratifiedKFold/__init__" - }, - "sklearn/sklearn.model_selection._split/ShuffleSplit/__init__": { - "target": "sklearn/sklearn.model_selection._split/ShuffleSplit/__init__" - }, "sklearn/sklearn.model_selection._split/ShuffleSplit/_iter_indices": { "target": "sklearn/sklearn.model_selection._split/ShuffleSplit/_iter_indices" }, @@ -8391,42 +10541,18 @@ "sklearn/sklearn.model_selection._split/StratifiedGroupKFold/_iter_test_indices": { "target": "sklearn/sklearn.model_selection._split/StratifiedGroupKFold/_iter_test_indices" }, - "sklearn/sklearn.model_selection._split/StratifiedKFold/__init__": { - "target": "sklearn/sklearn.model_selection._split/StratifiedKFold/__init__" - }, "sklearn/sklearn.model_selection._split/StratifiedKFold/_iter_test_masks": { "target": "sklearn/sklearn.model_selection._split/StratifiedKFold/_iter_test_masks" }, "sklearn/sklearn.model_selection._split/StratifiedKFold/_make_test_folds": { "target": "sklearn/sklearn.model_selection._split/StratifiedKFold/_make_test_folds" }, - "sklearn/sklearn.model_selection._split/StratifiedKFold/split": { - "target": "sklearn/sklearn.model_selection._split/StratifiedKFold/split" - }, - "sklearn/sklearn.model_selection._split/StratifiedShuffleSplit/__init__": { - "target": "sklearn/sklearn.model_selection._split/StratifiedShuffleSplit/__init__" - }, "sklearn/sklearn.model_selection._split/StratifiedShuffleSplit/_iter_indices": { "target": "sklearn/sklearn.model_selection._split/StratifiedShuffleSplit/_iter_indices" }, - "sklearn/sklearn.model_selection._split/StratifiedShuffleSplit/split": { - "target": "sklearn/sklearn.model_selection._split/StratifiedShuffleSplit/split" - }, - "sklearn/sklearn.model_selection._split/TimeSeriesSplit/__init__": { - "target": "sklearn/sklearn.model_selection._split/TimeSeriesSplit/__init__" - }, - "sklearn/sklearn.model_selection._split/TimeSeriesSplit/split": { - "target": "sklearn/sklearn.model_selection._split/TimeSeriesSplit/split" - }, "sklearn/sklearn.model_selection._split/_BaseKFold/__init__": { "target": "sklearn/sklearn.model_selection._split/_BaseKFold/__init__" }, - "sklearn/sklearn.model_selection._split/_BaseKFold/get_n_splits": { - "target": "sklearn/sklearn.model_selection._split/_BaseKFold/get_n_splits" - }, - "sklearn/sklearn.model_selection._split/_BaseKFold/split": { - "target": "sklearn/sklearn.model_selection._split/_BaseKFold/split" - }, "sklearn/sklearn.model_selection._split/_CVIterableWrapper/__init__": { "target": "sklearn/sklearn.model_selection._split/_CVIterableWrapper/__init__" }, @@ -8442,18 +10568,9 @@ "sklearn/sklearn.model_selection._split/_RepeatedSplits/__repr__": { "target": "sklearn/sklearn.model_selection._split/_RepeatedSplits/__repr__" }, - "sklearn/sklearn.model_selection._split/_RepeatedSplits/get_n_splits": { - "target": "sklearn/sklearn.model_selection._split/_RepeatedSplits/get_n_splits" - }, - "sklearn/sklearn.model_selection._split/_RepeatedSplits/split": { - "target": "sklearn/sklearn.model_selection._split/_RepeatedSplits/split" - }, "sklearn/sklearn.model_selection._split/_build_repr": { "target": "sklearn/sklearn.model_selection._split/_build_repr" }, - "sklearn/sklearn.model_selection._split/_validate_shuffle_split": { - "target": "sklearn/sklearn.model_selection._split/_validate_shuffle_split" - }, "sklearn/sklearn.model_selection._split/_yields_constant_splits": { "target": "sklearn/sklearn.model_selection._split/_yields_constant_splits" }, @@ -8484,9 +10601,6 @@ "sklearn/sklearn.model_selection._validation/_permutation_test_score": { "target": "sklearn/sklearn.model_selection._validation/_permutation_test_score" }, - "sklearn/sklearn.model_selection._validation/_score": { - "target": "sklearn/sklearn.model_selection._validation/_score" - }, "sklearn/sklearn.model_selection._validation/_shuffle": { "target": "sklearn/sklearn.model_selection._validation/_shuffle" }, @@ -8592,30 +10706,18 @@ "sklearn/sklearn.multioutput/RegressorChain/_more_tags": { "target": "sklearn/sklearn.multioutput/RegressorChain/_more_tags" }, - "sklearn/sklearn.multioutput/_BaseChain/__init__": { - "target": "sklearn/sklearn.multioutput/_BaseChain/__init__" - }, "sklearn/sklearn.multioutput/_BaseChain/fit": { "target": "sklearn/sklearn.multioutput/_BaseChain/fit" }, - "sklearn/sklearn.multioutput/_BaseChain/predict": { - "target": "sklearn/sklearn.multioutput/_BaseChain/predict" - }, "sklearn/sklearn.multioutput/_MultiOutputEstimator/__init__": { "target": "sklearn/sklearn.multioutput/_MultiOutputEstimator/__init__" }, "sklearn/sklearn.multioutput/_MultiOutputEstimator/_more_tags": { "target": "sklearn/sklearn.multioutput/_MultiOutputEstimator/_more_tags" }, - "sklearn/sklearn.multioutput/_MultiOutputEstimator/fit": { - "target": "sklearn/sklearn.multioutput/_MultiOutputEstimator/fit" - }, "sklearn/sklearn.multioutput/_MultiOutputEstimator/partial_fit": { "target": "sklearn/sklearn.multioutput/_MultiOutputEstimator/partial_fit" }, - "sklearn/sklearn.multioutput/_MultiOutputEstimator/predict": { - "target": "sklearn/sklearn.multioutput/_MultiOutputEstimator/predict" - }, "sklearn/sklearn.multioutput/_available_if_base_estimator_has": { "target": "sklearn/sklearn.multioutput/_available_if_base_estimator_has" }, @@ -8733,36 +10835,18 @@ "sklearn/sklearn.naive_bayes/_BaseDiscreteNB/_update_feature_log_prob": { "target": "sklearn/sklearn.naive_bayes/_BaseDiscreteNB/_update_feature_log_prob" }, - "sklearn/sklearn.naive_bayes/_BaseDiscreteNB/fit": { - "target": "sklearn/sklearn.naive_bayes/_BaseDiscreteNB/fit" - }, "sklearn/sklearn.naive_bayes/_BaseDiscreteNB/n_features_@getter": { "target": "sklearn/sklearn.naive_bayes/_BaseDiscreteNB/n_features_@getter" }, - "sklearn/sklearn.naive_bayes/_BaseDiscreteNB/partial_fit": { - "target": "sklearn/sklearn.naive_bayes/_BaseDiscreteNB/partial_fit" - }, "sklearn/sklearn.naive_bayes/_BaseNB/_check_X": { "target": "sklearn/sklearn.naive_bayes/_BaseNB/_check_X" }, "sklearn/sklearn.naive_bayes/_BaseNB/_joint_log_likelihood": { "target": "sklearn/sklearn.naive_bayes/_BaseNB/_joint_log_likelihood" }, - "sklearn/sklearn.naive_bayes/_BaseNB/predict": { - "target": "sklearn/sklearn.naive_bayes/_BaseNB/predict" - }, - "sklearn/sklearn.naive_bayes/_BaseNB/predict_log_proba": { - "target": "sklearn/sklearn.naive_bayes/_BaseNB/predict_log_proba" - }, - "sklearn/sklearn.naive_bayes/_BaseNB/predict_proba": { - "target": "sklearn/sklearn.naive_bayes/_BaseNB/predict_proba" - }, "sklearn/sklearn.neighbors._base/KNeighborsMixin/_kneighbors_reduce_func": { "target": "sklearn/sklearn.neighbors._base/KNeighborsMixin/_kneighbors_reduce_func" }, - "sklearn/sklearn.neighbors._base/KNeighborsMixin/kneighbors": { - "target": "sklearn/sklearn.neighbors._base/KNeighborsMixin/kneighbors" - }, "sklearn/sklearn.neighbors._base/KNeighborsMixin/kneighbors_graph": { "target": "sklearn/sklearn.neighbors._base/KNeighborsMixin/kneighbors_graph" }, @@ -8781,12 +10865,6 @@ "sklearn/sklearn.neighbors._base/RadiusNeighborsMixin/_radius_neighbors_reduce_func": { "target": "sklearn/sklearn.neighbors._base/RadiusNeighborsMixin/_radius_neighbors_reduce_func" }, - "sklearn/sklearn.neighbors._base/RadiusNeighborsMixin/radius_neighbors": { - "target": "sklearn/sklearn.neighbors._base/RadiusNeighborsMixin/radius_neighbors" - }, - "sklearn/sklearn.neighbors._base/RadiusNeighborsMixin/radius_neighbors_graph": { - "target": "sklearn/sklearn.neighbors._base/RadiusNeighborsMixin/radius_neighbors_graph" - }, "sklearn/sklearn.neighbors._base/_check_precomputed": { "target": "sklearn/sklearn.neighbors._base/_check_precomputed" }, @@ -8811,45 +10889,18 @@ "sklearn/sklearn.neighbors._base/_tree_query_radius_parallel_helper": { "target": "sklearn/sklearn.neighbors._base/_tree_query_radius_parallel_helper" }, - "sklearn/sklearn.neighbors._base/sort_graph_by_row_values": { - "target": "sklearn/sklearn.neighbors._base/sort_graph_by_row_values" - }, - "sklearn/sklearn.neighbors._classification/KNeighborsClassifier/__init__": { - "target": "sklearn/sklearn.neighbors._classification/KNeighborsClassifier/__init__" - }, "sklearn/sklearn.neighbors._classification/KNeighborsClassifier/_more_tags": { "target": "sklearn/sklearn.neighbors._classification/KNeighborsClassifier/_more_tags" }, - "sklearn/sklearn.neighbors._classification/KNeighborsClassifier/fit": { - "target": "sklearn/sklearn.neighbors._classification/KNeighborsClassifier/fit" - }, - "sklearn/sklearn.neighbors._classification/KNeighborsClassifier/predict": { - "target": "sklearn/sklearn.neighbors._classification/KNeighborsClassifier/predict" - }, - "sklearn/sklearn.neighbors._classification/KNeighborsClassifier/predict_proba": { - "target": "sklearn/sklearn.neighbors._classification/KNeighborsClassifier/predict_proba" - }, - "sklearn/sklearn.neighbors._classification/RadiusNeighborsClassifier/__init__": { - "target": "sklearn/sklearn.neighbors._classification/RadiusNeighborsClassifier/__init__" - }, "sklearn/sklearn.neighbors._classification/RadiusNeighborsClassifier/_more_tags": { "target": "sklearn/sklearn.neighbors._classification/RadiusNeighborsClassifier/_more_tags" }, - "sklearn/sklearn.neighbors._classification/RadiusNeighborsClassifier/fit": { - "target": "sklearn/sklearn.neighbors._classification/RadiusNeighborsClassifier/fit" - }, - "sklearn/sklearn.neighbors._classification/RadiusNeighborsClassifier/predict": { - "target": "sklearn/sklearn.neighbors._classification/RadiusNeighborsClassifier/predict" - }, "sklearn/sklearn.neighbors._classification/RadiusNeighborsClassifier/predict_proba": { "target": "sklearn/sklearn.neighbors._classification/RadiusNeighborsClassifier/predict_proba" }, "sklearn/sklearn.neighbors._distance_metric/DistanceMetric/_warn": { "target": "sklearn/sklearn.neighbors._distance_metric/DistanceMetric/_warn" }, - "sklearn/sklearn.neighbors._distance_metric/DistanceMetric/get_metric": { - "target": "sklearn/sklearn.neighbors._distance_metric/DistanceMetric/get_metric" - }, "sklearn/sklearn.neighbors._graph/KNeighborsTransformer/__init__": { "target": "sklearn/sklearn.neighbors._graph/KNeighborsTransformer/__init__" }, @@ -8889,30 +10940,15 @@ "sklearn/sklearn.neighbors._graph/radius_neighbors_graph": { "target": "sklearn/sklearn.neighbors._graph/radius_neighbors_graph" }, - "sklearn/sklearn.neighbors._kde/KernelDensity/__init__": { - "target": "sklearn/sklearn.neighbors._kde/KernelDensity/__init__" - }, "sklearn/sklearn.neighbors._kde/KernelDensity/_choose_algorithm": { "target": "sklearn/sklearn.neighbors._kde/KernelDensity/_choose_algorithm" }, "sklearn/sklearn.neighbors._kde/KernelDensity/_more_tags": { "target": "sklearn/sklearn.neighbors._kde/KernelDensity/_more_tags" }, - "sklearn/sklearn.neighbors._kde/KernelDensity/fit": { - "target": "sklearn/sklearn.neighbors._kde/KernelDensity/fit" - }, - "sklearn/sklearn.neighbors._kde/KernelDensity/sample": { - "target": "sklearn/sklearn.neighbors._kde/KernelDensity/sample" - }, "sklearn/sklearn.neighbors._kde/KernelDensity/score": { "target": "sklearn/sklearn.neighbors._kde/KernelDensity/score" }, - "sklearn/sklearn.neighbors._kde/KernelDensity/score_samples": { - "target": "sklearn/sklearn.neighbors._kde/KernelDensity/score_samples" - }, - "sklearn/sklearn.neighbors._lof/LocalOutlierFactor/__init__": { - "target": "sklearn/sklearn.neighbors._lof/LocalOutlierFactor/__init__" - }, "sklearn/sklearn.neighbors._lof/LocalOutlierFactor/_check_novelty_decision_function": { "target": "sklearn/sklearn.neighbors._lof/LocalOutlierFactor/_check_novelty_decision_function" }, @@ -8934,21 +10970,12 @@ "sklearn/sklearn.neighbors._lof/LocalOutlierFactor/decision_function": { "target": "sklearn/sklearn.neighbors._lof/LocalOutlierFactor/decision_function" }, - "sklearn/sklearn.neighbors._lof/LocalOutlierFactor/fit": { - "target": "sklearn/sklearn.neighbors._lof/LocalOutlierFactor/fit" - }, - "sklearn/sklearn.neighbors._lof/LocalOutlierFactor/fit_predict": { - "target": "sklearn/sklearn.neighbors._lof/LocalOutlierFactor/fit_predict" - }, "sklearn/sklearn.neighbors._lof/LocalOutlierFactor/predict": { "target": "sklearn/sklearn.neighbors._lof/LocalOutlierFactor/predict" }, "sklearn/sklearn.neighbors._lof/LocalOutlierFactor/score_samples": { "target": "sklearn/sklearn.neighbors._lof/LocalOutlierFactor/score_samples" }, - "sklearn/sklearn.neighbors._nca/NeighborhoodComponentsAnalysis/__init__": { - "target": "sklearn/sklearn.neighbors._nca/NeighborhoodComponentsAnalysis/__init__" - }, "sklearn/sklearn.neighbors._nca/NeighborhoodComponentsAnalysis/_callback": { "target": "sklearn/sklearn.neighbors._nca/NeighborhoodComponentsAnalysis/_callback" }, @@ -8970,42 +10997,9 @@ "sklearn/sklearn.neighbors._nca/NeighborhoodComponentsAnalysis/transform": { "target": "sklearn/sklearn.neighbors._nca/NeighborhoodComponentsAnalysis/transform" }, - "sklearn/sklearn.neighbors._nearest_centroid/NearestCentroid/__init__": { - "target": "sklearn/sklearn.neighbors._nearest_centroid/NearestCentroid/__init__" - }, - "sklearn/sklearn.neighbors._nearest_centroid/NearestCentroid/fit": { - "target": "sklearn/sklearn.neighbors._nearest_centroid/NearestCentroid/fit" - }, - "sklearn/sklearn.neighbors._nearest_centroid/NearestCentroid/predict": { - "target": "sklearn/sklearn.neighbors._nearest_centroid/NearestCentroid/predict" - }, - "sklearn/sklearn.neighbors._regression/KNeighborsRegressor/__init__": { - "target": "sklearn/sklearn.neighbors._regression/KNeighborsRegressor/__init__" - }, "sklearn/sklearn.neighbors._regression/KNeighborsRegressor/_more_tags": { "target": "sklearn/sklearn.neighbors._regression/KNeighborsRegressor/_more_tags" }, - "sklearn/sklearn.neighbors._regression/KNeighborsRegressor/fit": { - "target": "sklearn/sklearn.neighbors._regression/KNeighborsRegressor/fit" - }, - "sklearn/sklearn.neighbors._regression/KNeighborsRegressor/predict": { - "target": "sklearn/sklearn.neighbors._regression/KNeighborsRegressor/predict" - }, - "sklearn/sklearn.neighbors._regression/RadiusNeighborsRegressor/__init__": { - "target": "sklearn/sklearn.neighbors._regression/RadiusNeighborsRegressor/__init__" - }, - "sklearn/sklearn.neighbors._regression/RadiusNeighborsRegressor/fit": { - "target": "sklearn/sklearn.neighbors._regression/RadiusNeighborsRegressor/fit" - }, - "sklearn/sklearn.neighbors._regression/RadiusNeighborsRegressor/predict": { - "target": "sklearn/sklearn.neighbors._regression/RadiusNeighborsRegressor/predict" - }, - "sklearn/sklearn.neighbors._unsupervised/NearestNeighbors/__init__": { - "target": "sklearn/sklearn.neighbors._unsupervised/NearestNeighbors/__init__" - }, - "sklearn/sklearn.neighbors._unsupervised/NearestNeighbors/fit": { - "target": "sklearn/sklearn.neighbors._unsupervised/NearestNeighbors/fit" - }, "sklearn/sklearn.neighbors.setup/configuration": { "target": "sklearn/sklearn.neighbors.setup/configuration" }, @@ -9090,15 +11084,6 @@ "sklearn/sklearn.neural_network._multilayer_perceptron/BaseMultilayerPerceptron/_validate_hyperparameters": { "target": "sklearn/sklearn.neural_network._multilayer_perceptron/BaseMultilayerPerceptron/_validate_hyperparameters" }, - "sklearn/sklearn.neural_network._multilayer_perceptron/BaseMultilayerPerceptron/fit": { - "target": "sklearn/sklearn.neural_network._multilayer_perceptron/BaseMultilayerPerceptron/fit" - }, - "sklearn/sklearn.neural_network._multilayer_perceptron/BaseMultilayerPerceptron/partial_fit": { - "target": "sklearn/sklearn.neural_network._multilayer_perceptron/BaseMultilayerPerceptron/partial_fit" - }, - "sklearn/sklearn.neural_network._multilayer_perceptron/MLPClassifier/__init__": { - "target": "sklearn/sklearn.neural_network._multilayer_perceptron/MLPClassifier/__init__" - }, "sklearn/sklearn.neural_network._multilayer_perceptron/MLPClassifier/_more_tags": { "target": "sklearn/sklearn.neural_network._multilayer_perceptron/MLPClassifier/_more_tags" }, @@ -9108,30 +11093,15 @@ "sklearn/sklearn.neural_network._multilayer_perceptron/MLPClassifier/partial_fit": { "target": "sklearn/sklearn.neural_network._multilayer_perceptron/MLPClassifier/partial_fit" }, - "sklearn/sklearn.neural_network._multilayer_perceptron/MLPClassifier/predict": { - "target": "sklearn/sklearn.neural_network._multilayer_perceptron/MLPClassifier/predict" - }, "sklearn/sklearn.neural_network._multilayer_perceptron/MLPClassifier/predict_log_proba": { "target": "sklearn/sklearn.neural_network._multilayer_perceptron/MLPClassifier/predict_log_proba" }, - "sklearn/sklearn.neural_network._multilayer_perceptron/MLPClassifier/predict_proba": { - "target": "sklearn/sklearn.neural_network._multilayer_perceptron/MLPClassifier/predict_proba" - }, - "sklearn/sklearn.neural_network._multilayer_perceptron/MLPRegressor/__init__": { - "target": "sklearn/sklearn.neural_network._multilayer_perceptron/MLPRegressor/__init__" - }, "sklearn/sklearn.neural_network._multilayer_perceptron/MLPRegressor/_validate_input": { "target": "sklearn/sklearn.neural_network._multilayer_perceptron/MLPRegressor/_validate_input" }, - "sklearn/sklearn.neural_network._multilayer_perceptron/MLPRegressor/predict": { - "target": "sklearn/sklearn.neural_network._multilayer_perceptron/MLPRegressor/predict" - }, "sklearn/sklearn.neural_network._multilayer_perceptron/_pack": { "target": "sklearn/sklearn.neural_network._multilayer_perceptron/_pack" }, - "sklearn/sklearn.neural_network._rbm/BernoulliRBM/__init__": { - "target": "sklearn/sklearn.neural_network._rbm/BernoulliRBM/__init__" - }, "sklearn/sklearn.neural_network._rbm/BernoulliRBM/_fit": { "target": "sklearn/sklearn.neural_network._rbm/BernoulliRBM/_fit" }, @@ -9150,18 +11120,12 @@ "sklearn/sklearn.neural_network._rbm/BernoulliRBM/_sample_visibles": { "target": "sklearn/sklearn.neural_network._rbm/BernoulliRBM/_sample_visibles" }, - "sklearn/sklearn.neural_network._rbm/BernoulliRBM/fit": { - "target": "sklearn/sklearn.neural_network._rbm/BernoulliRBM/fit" - }, "sklearn/sklearn.neural_network._rbm/BernoulliRBM/gibbs": { "target": "sklearn/sklearn.neural_network._rbm/BernoulliRBM/gibbs" }, "sklearn/sklearn.neural_network._rbm/BernoulliRBM/partial_fit": { "target": "sklearn/sklearn.neural_network._rbm/BernoulliRBM/partial_fit" }, - "sklearn/sklearn.neural_network._rbm/BernoulliRBM/score_samples": { - "target": "sklearn/sklearn.neural_network._rbm/BernoulliRBM/score_samples" - }, "sklearn/sklearn.neural_network._rbm/BernoulliRBM/transform": { "target": "sklearn/sklearn.neural_network._rbm/BernoulliRBM/transform" }, @@ -9198,30 +11162,15 @@ "sklearn/sklearn.pipeline/FeatureUnion/__sklearn_is_fitted__": { "target": "sklearn/sklearn.pipeline/FeatureUnion/__sklearn_is_fitted__" }, - "sklearn/sklearn.pipeline/FeatureUnion/_hstack": { - "target": "sklearn/sklearn.pipeline/FeatureUnion/_hstack" - }, - "sklearn/sklearn.pipeline/FeatureUnion/_iter": { - "target": "sklearn/sklearn.pipeline/FeatureUnion/_iter" - }, "sklearn/sklearn.pipeline/FeatureUnion/_log_message": { "target": "sklearn/sklearn.pipeline/FeatureUnion/_log_message" }, - "sklearn/sklearn.pipeline/FeatureUnion/_parallel_func": { - "target": "sklearn/sklearn.pipeline/FeatureUnion/_parallel_func" - }, "sklearn/sklearn.pipeline/FeatureUnion/_sk_visual_block_": { "target": "sklearn/sklearn.pipeline/FeatureUnion/_sk_visual_block_" }, - "sklearn/sklearn.pipeline/FeatureUnion/_update_transformer_list": { - "target": "sklearn/sklearn.pipeline/FeatureUnion/_update_transformer_list" - }, "sklearn/sklearn.pipeline/FeatureUnion/_validate_transformer_weights": { "target": "sklearn/sklearn.pipeline/FeatureUnion/_validate_transformer_weights" }, - "sklearn/sklearn.pipeline/FeatureUnion/_validate_transformers": { - "target": "sklearn/sklearn.pipeline/FeatureUnion/_validate_transformers" - }, "sklearn/sklearn.pipeline/FeatureUnion/get_feature_names_out": { "target": "sklearn/sklearn.pipeline/FeatureUnion/get_feature_names_out" }, @@ -9261,9 +11210,6 @@ "sklearn/sklearn.pipeline/Pipeline/_fit": { "target": "sklearn/sklearn.pipeline/Pipeline/_fit" }, - "sklearn/sklearn.pipeline/Pipeline/_iter": { - "target": "sklearn/sklearn.pipeline/Pipeline/_iter" - }, "sklearn/sklearn.pipeline/Pipeline/_log_message": { "target": "sklearn/sklearn.pipeline/Pipeline/_log_message" }, @@ -9309,27 +11255,12 @@ "sklearn/sklearn.pipeline/_fit_transform_one": { "target": "sklearn/sklearn.pipeline/_fit_transform_one" }, - "sklearn/sklearn.pipeline/_name_estimators": { - "target": "sklearn/sklearn.pipeline/_name_estimators" - }, "sklearn/sklearn.pipeline/_transform_one": { "target": "sklearn/sklearn.pipeline/_transform_one" }, - "sklearn/sklearn.preprocessing._data/Binarizer/__init__": { - "target": "sklearn/sklearn.preprocessing._data/Binarizer/__init__" - }, "sklearn/sklearn.preprocessing._data/Binarizer/_more_tags": { "target": "sklearn/sklearn.preprocessing._data/Binarizer/_more_tags" }, - "sklearn/sklearn.preprocessing._data/Binarizer/fit": { - "target": "sklearn/sklearn.preprocessing._data/Binarizer/fit" - }, - "sklearn/sklearn.preprocessing._data/Binarizer/transform": { - "target": "sklearn/sklearn.preprocessing._data/Binarizer/transform" - }, - "sklearn/sklearn.preprocessing._data/KernelCenterer/__init__": { - "target": "sklearn/sklearn.preprocessing._data/KernelCenterer/__init__" - }, "sklearn/sklearn.preprocessing._data/KernelCenterer/_more_tags": { "target": "sklearn/sklearn.preprocessing._data/KernelCenterer/_more_tags" }, @@ -9339,66 +11270,24 @@ "sklearn/sklearn.preprocessing._data/KernelCenterer/fit": { "target": "sklearn/sklearn.preprocessing._data/KernelCenterer/fit" }, - "sklearn/sklearn.preprocessing._data/KernelCenterer/transform": { - "target": "sklearn/sklearn.preprocessing._data/KernelCenterer/transform" - }, - "sklearn/sklearn.preprocessing._data/MaxAbsScaler/__init__": { - "target": "sklearn/sklearn.preprocessing._data/MaxAbsScaler/__init__" - }, "sklearn/sklearn.preprocessing._data/MaxAbsScaler/_more_tags": { "target": "sklearn/sklearn.preprocessing._data/MaxAbsScaler/_more_tags" }, "sklearn/sklearn.preprocessing._data/MaxAbsScaler/_reset": { "target": "sklearn/sklearn.preprocessing._data/MaxAbsScaler/_reset" }, - "sklearn/sklearn.preprocessing._data/MaxAbsScaler/fit": { - "target": "sklearn/sklearn.preprocessing._data/MaxAbsScaler/fit" - }, - "sklearn/sklearn.preprocessing._data/MaxAbsScaler/inverse_transform": { - "target": "sklearn/sklearn.preprocessing._data/MaxAbsScaler/inverse_transform" - }, "sklearn/sklearn.preprocessing._data/MaxAbsScaler/partial_fit": { "target": "sklearn/sklearn.preprocessing._data/MaxAbsScaler/partial_fit" }, - "sklearn/sklearn.preprocessing._data/MaxAbsScaler/transform": { - "target": "sklearn/sklearn.preprocessing._data/MaxAbsScaler/transform" - }, - "sklearn/sklearn.preprocessing._data/MinMaxScaler/__init__": { - "target": "sklearn/sklearn.preprocessing._data/MinMaxScaler/__init__" - }, "sklearn/sklearn.preprocessing._data/MinMaxScaler/_more_tags": { "target": "sklearn/sklearn.preprocessing._data/MinMaxScaler/_more_tags" }, "sklearn/sklearn.preprocessing._data/MinMaxScaler/_reset": { "target": "sklearn/sklearn.preprocessing._data/MinMaxScaler/_reset" }, - "sklearn/sklearn.preprocessing._data/MinMaxScaler/fit": { - "target": "sklearn/sklearn.preprocessing._data/MinMaxScaler/fit" - }, - "sklearn/sklearn.preprocessing._data/MinMaxScaler/inverse_transform": { - "target": "sklearn/sklearn.preprocessing._data/MinMaxScaler/inverse_transform" - }, - "sklearn/sklearn.preprocessing._data/MinMaxScaler/partial_fit": { - "target": "sklearn/sklearn.preprocessing._data/MinMaxScaler/partial_fit" - }, - "sklearn/sklearn.preprocessing._data/MinMaxScaler/transform": { - "target": "sklearn/sklearn.preprocessing._data/MinMaxScaler/transform" - }, - "sklearn/sklearn.preprocessing._data/Normalizer/__init__": { - "target": "sklearn/sklearn.preprocessing._data/Normalizer/__init__" - }, "sklearn/sklearn.preprocessing._data/Normalizer/_more_tags": { "target": "sklearn/sklearn.preprocessing._data/Normalizer/_more_tags" }, - "sklearn/sklearn.preprocessing._data/Normalizer/fit": { - "target": "sklearn/sklearn.preprocessing._data/Normalizer/fit" - }, - "sklearn/sklearn.preprocessing._data/Normalizer/transform": { - "target": "sklearn/sklearn.preprocessing._data/Normalizer/transform" - }, - "sklearn/sklearn.preprocessing._data/PowerTransformer/__init__": { - "target": "sklearn/sklearn.preprocessing._data/PowerTransformer/__init__" - }, "sklearn/sklearn.preprocessing._data/PowerTransformer/_box_cox_inverse_tranform": { "target": "sklearn/sklearn.preprocessing._data/PowerTransformer/_box_cox_inverse_tranform" }, @@ -9423,21 +11312,6 @@ "sklearn/sklearn.preprocessing._data/PowerTransformer/_yeo_johnson_transform": { "target": "sklearn/sklearn.preprocessing._data/PowerTransformer/_yeo_johnson_transform" }, - "sklearn/sklearn.preprocessing._data/PowerTransformer/fit": { - "target": "sklearn/sklearn.preprocessing._data/PowerTransformer/fit" - }, - "sklearn/sklearn.preprocessing._data/PowerTransformer/fit_transform": { - "target": "sklearn/sklearn.preprocessing._data/PowerTransformer/fit_transform" - }, - "sklearn/sklearn.preprocessing._data/PowerTransformer/inverse_transform": { - "target": "sklearn/sklearn.preprocessing._data/PowerTransformer/inverse_transform" - }, - "sklearn/sklearn.preprocessing._data/PowerTransformer/transform": { - "target": "sklearn/sklearn.preprocessing._data/PowerTransformer/transform" - }, - "sklearn/sklearn.preprocessing._data/QuantileTransformer/__init__": { - "target": "sklearn/sklearn.preprocessing._data/QuantileTransformer/__init__" - }, "sklearn/sklearn.preprocessing._data/QuantileTransformer/_check_inputs": { "target": "sklearn/sklearn.preprocessing._data/QuantileTransformer/_check_inputs" }, @@ -9456,51 +11330,15 @@ "sklearn/sklearn.preprocessing._data/QuantileTransformer/_transform_col": { "target": "sklearn/sklearn.preprocessing._data/QuantileTransformer/_transform_col" }, - "sklearn/sklearn.preprocessing._data/QuantileTransformer/fit": { - "target": "sklearn/sklearn.preprocessing._data/QuantileTransformer/fit" - }, - "sklearn/sklearn.preprocessing._data/QuantileTransformer/inverse_transform": { - "target": "sklearn/sklearn.preprocessing._data/QuantileTransformer/inverse_transform" - }, - "sklearn/sklearn.preprocessing._data/QuantileTransformer/transform": { - "target": "sklearn/sklearn.preprocessing._data/QuantileTransformer/transform" - }, - "sklearn/sklearn.preprocessing._data/RobustScaler/__init__": { - "target": "sklearn/sklearn.preprocessing._data/RobustScaler/__init__" - }, "sklearn/sklearn.preprocessing._data/RobustScaler/_more_tags": { "target": "sklearn/sklearn.preprocessing._data/RobustScaler/_more_tags" }, - "sklearn/sklearn.preprocessing._data/RobustScaler/fit": { - "target": "sklearn/sklearn.preprocessing._data/RobustScaler/fit" - }, - "sklearn/sklearn.preprocessing._data/RobustScaler/inverse_transform": { - "target": "sklearn/sklearn.preprocessing._data/RobustScaler/inverse_transform" - }, - "sklearn/sklearn.preprocessing._data/RobustScaler/transform": { - "target": "sklearn/sklearn.preprocessing._data/RobustScaler/transform" - }, - "sklearn/sklearn.preprocessing._data/StandardScaler/__init__": { - "target": "sklearn/sklearn.preprocessing._data/StandardScaler/__init__" - }, "sklearn/sklearn.preprocessing._data/StandardScaler/_more_tags": { "target": "sklearn/sklearn.preprocessing._data/StandardScaler/_more_tags" }, "sklearn/sklearn.preprocessing._data/StandardScaler/_reset": { "target": "sklearn/sklearn.preprocessing._data/StandardScaler/_reset" }, - "sklearn/sklearn.preprocessing._data/StandardScaler/fit": { - "target": "sklearn/sklearn.preprocessing._data/StandardScaler/fit" - }, - "sklearn/sklearn.preprocessing._data/StandardScaler/inverse_transform": { - "target": "sklearn/sklearn.preprocessing._data/StandardScaler/inverse_transform" - }, - "sklearn/sklearn.preprocessing._data/StandardScaler/partial_fit": { - "target": "sklearn/sklearn.preprocessing._data/StandardScaler/partial_fit" - }, - "sklearn/sklearn.preprocessing._data/StandardScaler/transform": { - "target": "sklearn/sklearn.preprocessing._data/StandardScaler/transform" - }, "sklearn/sklearn.preprocessing._data/_handle_zeros_in_scale": { "target": "sklearn/sklearn.preprocessing._data/_handle_zeros_in_scale" }, @@ -9510,27 +11348,15 @@ "sklearn/sklearn.preprocessing._data/add_dummy_feature": { "target": "sklearn/sklearn.preprocessing._data/add_dummy_feature" }, - "sklearn/sklearn.preprocessing._discretization/KBinsDiscretizer/__init__": { - "target": "sklearn/sklearn.preprocessing._discretization/KBinsDiscretizer/__init__" - }, "sklearn/sklearn.preprocessing._discretization/KBinsDiscretizer/_validate_n_bins": { "target": "sklearn/sklearn.preprocessing._discretization/KBinsDiscretizer/_validate_n_bins" }, - "sklearn/sklearn.preprocessing._discretization/KBinsDiscretizer/fit": { - "target": "sklearn/sklearn.preprocessing._discretization/KBinsDiscretizer/fit" - }, "sklearn/sklearn.preprocessing._discretization/KBinsDiscretizer/get_feature_names_out": { "target": "sklearn/sklearn.preprocessing._discretization/KBinsDiscretizer/get_feature_names_out" }, "sklearn/sklearn.preprocessing._discretization/KBinsDiscretizer/inverse_transform": { "target": "sklearn/sklearn.preprocessing._discretization/KBinsDiscretizer/inverse_transform" }, - "sklearn/sklearn.preprocessing._discretization/KBinsDiscretizer/transform": { - "target": "sklearn/sklearn.preprocessing._discretization/KBinsDiscretizer/transform" - }, - "sklearn/sklearn.preprocessing._encoders/OneHotEncoder/__init__": { - "target": "sklearn/sklearn.preprocessing._encoders/OneHotEncoder/__init__" - }, "sklearn/sklearn.preprocessing._encoders/OneHotEncoder/_compute_drop_idx": { "target": "sklearn/sklearn.preprocessing._encoders/OneHotEncoder/_compute_drop_idx" }, @@ -9558,39 +11384,12 @@ "sklearn/sklearn.preprocessing._encoders/OneHotEncoder/_validate_keywords": { "target": "sklearn/sklearn.preprocessing._encoders/OneHotEncoder/_validate_keywords" }, - "sklearn/sklearn.preprocessing._encoders/OneHotEncoder/fit": { - "target": "sklearn/sklearn.preprocessing._encoders/OneHotEncoder/fit" - }, - "sklearn/sklearn.preprocessing._encoders/OneHotEncoder/fit_transform": { - "target": "sklearn/sklearn.preprocessing._encoders/OneHotEncoder/fit_transform" - }, - "sklearn/sklearn.preprocessing._encoders/OneHotEncoder/get_feature_names": { - "target": "sklearn/sklearn.preprocessing._encoders/OneHotEncoder/get_feature_names" - }, "sklearn/sklearn.preprocessing._encoders/OneHotEncoder/get_feature_names_out": { "target": "sklearn/sklearn.preprocessing._encoders/OneHotEncoder/get_feature_names_out" }, "sklearn/sklearn.preprocessing._encoders/OneHotEncoder/infrequent_categories_@getter": { "target": "sklearn/sklearn.preprocessing._encoders/OneHotEncoder/infrequent_categories_@getter" }, - "sklearn/sklearn.preprocessing._encoders/OneHotEncoder/inverse_transform": { - "target": "sklearn/sklearn.preprocessing._encoders/OneHotEncoder/inverse_transform" - }, - "sklearn/sklearn.preprocessing._encoders/OneHotEncoder/transform": { - "target": "sklearn/sklearn.preprocessing._encoders/OneHotEncoder/transform" - }, - "sklearn/sklearn.preprocessing._encoders/OrdinalEncoder/__init__": { - "target": "sklearn/sklearn.preprocessing._encoders/OrdinalEncoder/__init__" - }, - "sklearn/sklearn.preprocessing._encoders/OrdinalEncoder/fit": { - "target": "sklearn/sklearn.preprocessing._encoders/OrdinalEncoder/fit" - }, - "sklearn/sklearn.preprocessing._encoders/OrdinalEncoder/inverse_transform": { - "target": "sklearn/sklearn.preprocessing._encoders/OrdinalEncoder/inverse_transform" - }, - "sklearn/sklearn.preprocessing._encoders/OrdinalEncoder/transform": { - "target": "sklearn/sklearn.preprocessing._encoders/OrdinalEncoder/transform" - }, "sklearn/sklearn.preprocessing._encoders/_BaseEncoder/_check_X": { "target": "sklearn/sklearn.preprocessing._encoders/_BaseEncoder/_check_X" }, @@ -9606,9 +11405,6 @@ "sklearn/sklearn.preprocessing._encoders/_BaseEncoder/_transform": { "target": "sklearn/sklearn.preprocessing._encoders/_BaseEncoder/_transform" }, - "sklearn/sklearn.preprocessing._function_transformer/FunctionTransformer/__init__": { - "target": "sklearn/sklearn.preprocessing._function_transformer/FunctionTransformer/__init__" - }, "sklearn/sklearn.preprocessing._function_transformer/FunctionTransformer/__sklearn_is_fitted__": { "target": "sklearn/sklearn.preprocessing._function_transformer/FunctionTransformer/__sklearn_is_fitted__" }, @@ -9633,48 +11429,15 @@ "sklearn/sklearn.preprocessing._function_transformer/FunctionTransformer/inverse_transform": { "target": "sklearn/sklearn.preprocessing._function_transformer/FunctionTransformer/inverse_transform" }, - "sklearn/sklearn.preprocessing._function_transformer/FunctionTransformer/transform": { - "target": "sklearn/sklearn.preprocessing._function_transformer/FunctionTransformer/transform" - }, "sklearn/sklearn.preprocessing._function_transformer/_identity": { "target": "sklearn/sklearn.preprocessing._function_transformer/_identity" }, - "sklearn/sklearn.preprocessing._label/LabelBinarizer/__init__": { - "target": "sklearn/sklearn.preprocessing._label/LabelBinarizer/__init__" - }, "sklearn/sklearn.preprocessing._label/LabelBinarizer/_more_tags": { "target": "sklearn/sklearn.preprocessing._label/LabelBinarizer/_more_tags" }, - "sklearn/sklearn.preprocessing._label/LabelBinarizer/fit": { - "target": "sklearn/sklearn.preprocessing._label/LabelBinarizer/fit" - }, - "sklearn/sklearn.preprocessing._label/LabelBinarizer/fit_transform": { - "target": "sklearn/sklearn.preprocessing._label/LabelBinarizer/fit_transform" - }, - "sklearn/sklearn.preprocessing._label/LabelBinarizer/inverse_transform": { - "target": "sklearn/sklearn.preprocessing._label/LabelBinarizer/inverse_transform" - }, - "sklearn/sklearn.preprocessing._label/LabelBinarizer/transform": { - "target": "sklearn/sklearn.preprocessing._label/LabelBinarizer/transform" - }, "sklearn/sklearn.preprocessing._label/LabelEncoder/_more_tags": { "target": "sklearn/sklearn.preprocessing._label/LabelEncoder/_more_tags" }, - "sklearn/sklearn.preprocessing._label/LabelEncoder/fit": { - "target": "sklearn/sklearn.preprocessing._label/LabelEncoder/fit" - }, - "sklearn/sklearn.preprocessing._label/LabelEncoder/fit_transform": { - "target": "sklearn/sklearn.preprocessing._label/LabelEncoder/fit_transform" - }, - "sklearn/sklearn.preprocessing._label/LabelEncoder/inverse_transform": { - "target": "sklearn/sklearn.preprocessing._label/LabelEncoder/inverse_transform" - }, - "sklearn/sklearn.preprocessing._label/LabelEncoder/transform": { - "target": "sklearn/sklearn.preprocessing._label/LabelEncoder/transform" - }, - "sklearn/sklearn.preprocessing._label/MultiLabelBinarizer/__init__": { - "target": "sklearn/sklearn.preprocessing._label/MultiLabelBinarizer/__init__" - }, "sklearn/sklearn.preprocessing._label/MultiLabelBinarizer/_build_cache": { "target": "sklearn/sklearn.preprocessing._label/MultiLabelBinarizer/_build_cache" }, @@ -9684,39 +11447,18 @@ "sklearn/sklearn.preprocessing._label/MultiLabelBinarizer/_transform": { "target": "sklearn/sklearn.preprocessing._label/MultiLabelBinarizer/_transform" }, - "sklearn/sklearn.preprocessing._label/MultiLabelBinarizer/fit": { - "target": "sklearn/sklearn.preprocessing._label/MultiLabelBinarizer/fit" - }, - "sklearn/sklearn.preprocessing._label/MultiLabelBinarizer/fit_transform": { - "target": "sklearn/sklearn.preprocessing._label/MultiLabelBinarizer/fit_transform" - }, - "sklearn/sklearn.preprocessing._label/MultiLabelBinarizer/inverse_transform": { - "target": "sklearn/sklearn.preprocessing._label/MultiLabelBinarizer/inverse_transform" - }, - "sklearn/sklearn.preprocessing._label/MultiLabelBinarizer/transform": { - "target": "sklearn/sklearn.preprocessing._label/MultiLabelBinarizer/transform" - }, "sklearn/sklearn.preprocessing._label/_inverse_binarize_multiclass": { "target": "sklearn/sklearn.preprocessing._label/_inverse_binarize_multiclass" }, "sklearn/sklearn.preprocessing._label/_inverse_binarize_thresholding": { "target": "sklearn/sklearn.preprocessing._label/_inverse_binarize_thresholding" }, - "sklearn/sklearn.preprocessing._polynomial/PolynomialFeatures/__init__": { - "target": "sklearn/sklearn.preprocessing._polynomial/PolynomialFeatures/__init__" - }, "sklearn/sklearn.preprocessing._polynomial/PolynomialFeatures/_combinations": { "target": "sklearn/sklearn.preprocessing._polynomial/PolynomialFeatures/_combinations" }, "sklearn/sklearn.preprocessing._polynomial/PolynomialFeatures/_num_combinations": { "target": "sklearn/sklearn.preprocessing._polynomial/PolynomialFeatures/_num_combinations" }, - "sklearn/sklearn.preprocessing._polynomial/PolynomialFeatures/fit": { - "target": "sklearn/sklearn.preprocessing._polynomial/PolynomialFeatures/fit" - }, - "sklearn/sklearn.preprocessing._polynomial/PolynomialFeatures/get_feature_names": { - "target": "sklearn/sklearn.preprocessing._polynomial/PolynomialFeatures/get_feature_names" - }, "sklearn/sklearn.preprocessing._polynomial/PolynomialFeatures/get_feature_names_out": { "target": "sklearn/sklearn.preprocessing._polynomial/PolynomialFeatures/get_feature_names_out" }, @@ -9726,9 +11468,6 @@ "sklearn/sklearn.preprocessing._polynomial/PolynomialFeatures/powers_@getter": { "target": "sklearn/sklearn.preprocessing._polynomial/PolynomialFeatures/powers_@getter" }, - "sklearn/sklearn.preprocessing._polynomial/PolynomialFeatures/transform": { - "target": "sklearn/sklearn.preprocessing._polynomial/PolynomialFeatures/transform" - }, "sklearn/sklearn.preprocessing._polynomial/SplineTransformer/__init__": { "target": "sklearn/sklearn.preprocessing._polynomial/SplineTransformer/__init__" }, @@ -9798,27 +11537,9 @@ "sklearn/sklearn.semi_supervised._label_propagation/BaseLabelPropagation/_get_kernel": { "target": "sklearn/sklearn.semi_supervised._label_propagation/BaseLabelPropagation/_get_kernel" }, - "sklearn/sklearn.semi_supervised._label_propagation/BaseLabelPropagation/fit": { - "target": "sklearn/sklearn.semi_supervised._label_propagation/BaseLabelPropagation/fit" - }, - "sklearn/sklearn.semi_supervised._label_propagation/BaseLabelPropagation/predict": { - "target": "sklearn/sklearn.semi_supervised._label_propagation/BaseLabelPropagation/predict" - }, - "sklearn/sklearn.semi_supervised._label_propagation/BaseLabelPropagation/predict_proba": { - "target": "sklearn/sklearn.semi_supervised._label_propagation/BaseLabelPropagation/predict_proba" - }, - "sklearn/sklearn.semi_supervised._label_propagation/LabelPropagation/__init__": { - "target": "sklearn/sklearn.semi_supervised._label_propagation/LabelPropagation/__init__" - }, "sklearn/sklearn.semi_supervised._label_propagation/LabelPropagation/_build_graph": { "target": "sklearn/sklearn.semi_supervised._label_propagation/LabelPropagation/_build_graph" }, - "sklearn/sklearn.semi_supervised._label_propagation/LabelPropagation/fit": { - "target": "sklearn/sklearn.semi_supervised._label_propagation/LabelPropagation/fit" - }, - "sklearn/sklearn.semi_supervised._label_propagation/LabelSpreading/__init__": { - "target": "sklearn/sklearn.semi_supervised._label_propagation/LabelSpreading/__init__" - }, "sklearn/sklearn.semi_supervised._label_propagation/LabelSpreading/_build_graph": { "target": "sklearn/sklearn.semi_supervised._label_propagation/LabelSpreading/_build_graph" }, @@ -9894,24 +11615,15 @@ "sklearn/sklearn.svm._base/BaseLibSVM/coef_@getter": { "target": "sklearn/sklearn.svm._base/BaseLibSVM/coef_@getter" }, - "sklearn/sklearn.svm._base/BaseLibSVM/fit": { - "target": "sklearn/sklearn.svm._base/BaseLibSVM/fit" - }, "sklearn/sklearn.svm._base/BaseLibSVM/n_support_@getter": { "target": "sklearn/sklearn.svm._base/BaseLibSVM/n_support_@getter" }, - "sklearn/sklearn.svm._base/BaseLibSVM/predict": { - "target": "sklearn/sklearn.svm._base/BaseLibSVM/predict" - }, "sklearn/sklearn.svm._base/BaseSVC/__init__": { "target": "sklearn/sklearn.svm._base/BaseSVC/__init__" }, "sklearn/sklearn.svm._base/BaseSVC/_check_proba": { "target": "sklearn/sklearn.svm._base/BaseSVC/_check_proba" }, - "sklearn/sklearn.svm._base/BaseSVC/_class_weight@getter": { - "target": "sklearn/sklearn.svm._base/BaseSVC/_class_weight@getter" - }, "sklearn/sklearn.svm._base/BaseSVC/_dense_predict_proba": { "target": "sklearn/sklearn.svm._base/BaseSVC/_dense_predict_proba" }, @@ -9924,18 +11636,9 @@ "sklearn/sklearn.svm._base/BaseSVC/_validate_targets": { "target": "sklearn/sklearn.svm._base/BaseSVC/_validate_targets" }, - "sklearn/sklearn.svm._base/BaseSVC/decision_function": { - "target": "sklearn/sklearn.svm._base/BaseSVC/decision_function" - }, - "sklearn/sklearn.svm._base/BaseSVC/predict": { - "target": "sklearn/sklearn.svm._base/BaseSVC/predict" - }, "sklearn/sklearn.svm._base/BaseSVC/predict_log_proba": { "target": "sklearn/sklearn.svm._base/BaseSVC/predict_log_proba" }, - "sklearn/sklearn.svm._base/BaseSVC/predict_proba": { - "target": "sklearn/sklearn.svm._base/BaseSVC/predict_proba" - }, "sklearn/sklearn.svm._base/BaseSVC/probA_@getter": { "target": "sklearn/sklearn.svm._base/BaseSVC/probA_@getter" }, @@ -9954,75 +11657,30 @@ "sklearn/sklearn.svm._bounds/l1_min_c": { "target": "sklearn/sklearn.svm._bounds/l1_min_c" }, - "sklearn/sklearn.svm._classes/LinearSVC/__init__": { - "target": "sklearn/sklearn.svm._classes/LinearSVC/__init__" - }, "sklearn/sklearn.svm._classes/LinearSVC/_more_tags": { "target": "sklearn/sklearn.svm._classes/LinearSVC/_more_tags" }, - "sklearn/sklearn.svm._classes/LinearSVC/fit": { - "target": "sklearn/sklearn.svm._classes/LinearSVC/fit" - }, - "sklearn/sklearn.svm._classes/LinearSVR/__init__": { - "target": "sklearn/sklearn.svm._classes/LinearSVR/__init__" - }, "sklearn/sklearn.svm._classes/LinearSVR/_more_tags": { "target": "sklearn/sklearn.svm._classes/LinearSVR/_more_tags" }, - "sklearn/sklearn.svm._classes/LinearSVR/fit": { - "target": "sklearn/sklearn.svm._classes/LinearSVR/fit" - }, - "sklearn/sklearn.svm._classes/NuSVC/__init__": { - "target": "sklearn/sklearn.svm._classes/NuSVC/__init__" - }, "sklearn/sklearn.svm._classes/NuSVC/_more_tags": { "target": "sklearn/sklearn.svm._classes/NuSVC/_more_tags" }, - "sklearn/sklearn.svm._classes/NuSVR/__init__": { - "target": "sklearn/sklearn.svm._classes/NuSVR/__init__" - }, "sklearn/sklearn.svm._classes/NuSVR/_more_tags": { "target": "sklearn/sklearn.svm._classes/NuSVR/_more_tags" }, - "sklearn/sklearn.svm._classes/NuSVR/class_weight_@getter": { - "target": "sklearn/sklearn.svm._classes/NuSVR/class_weight_@getter" - }, - "sklearn/sklearn.svm._classes/OneClassSVM/__init__": { - "target": "sklearn/sklearn.svm._classes/OneClassSVM/__init__" - }, "sklearn/sklearn.svm._classes/OneClassSVM/_more_tags": { "target": "sklearn/sklearn.svm._classes/OneClassSVM/_more_tags" }, - "sklearn/sklearn.svm._classes/OneClassSVM/class_weight_@getter": { - "target": "sklearn/sklearn.svm._classes/OneClassSVM/class_weight_@getter" - }, - "sklearn/sklearn.svm._classes/OneClassSVM/decision_function": { - "target": "sklearn/sklearn.svm._classes/OneClassSVM/decision_function" - }, - "sklearn/sklearn.svm._classes/OneClassSVM/fit": { - "target": "sklearn/sklearn.svm._classes/OneClassSVM/fit" - }, - "sklearn/sklearn.svm._classes/OneClassSVM/predict": { - "target": "sklearn/sklearn.svm._classes/OneClassSVM/predict" - }, "sklearn/sklearn.svm._classes/OneClassSVM/score_samples": { "target": "sklearn/sklearn.svm._classes/OneClassSVM/score_samples" }, - "sklearn/sklearn.svm._classes/SVC/__init__": { - "target": "sklearn/sklearn.svm._classes/SVC/__init__" - }, "sklearn/sklearn.svm._classes/SVC/_more_tags": { "target": "sklearn/sklearn.svm._classes/SVC/_more_tags" }, - "sklearn/sklearn.svm._classes/SVR/__init__": { - "target": "sklearn/sklearn.svm._classes/SVR/__init__" - }, "sklearn/sklearn.svm._classes/SVR/_more_tags": { "target": "sklearn/sklearn.svm._classes/SVR/_more_tags" }, - "sklearn/sklearn.svm._classes/SVR/class_weight_@getter": { - "target": "sklearn/sklearn.svm._classes/SVR/class_weight_@getter" - }, "sklearn/sklearn.svm.setup/configuration": { "target": "sklearn/sklearn.svm.setup/configuration" }, @@ -10035,12 +11693,6 @@ "sklearn/sklearn.tree._classes/BaseDecisionTree/_validate_X_predict": { "target": "sklearn/sklearn.tree._classes/BaseDecisionTree/_validate_X_predict" }, - "sklearn/sklearn.tree._classes/BaseDecisionTree/apply": { - "target": "sklearn/sklearn.tree._classes/BaseDecisionTree/apply" - }, - "sklearn/sklearn.tree._classes/BaseDecisionTree/cost_complexity_pruning_path": { - "target": "sklearn/sklearn.tree._classes/BaseDecisionTree/cost_complexity_pruning_path" - }, "sklearn/sklearn.tree._classes/BaseDecisionTree/decision_path": { "target": "sklearn/sklearn.tree._classes/BaseDecisionTree/decision_path" }, @@ -10053,48 +11705,21 @@ "sklearn/sklearn.tree._classes/BaseDecisionTree/get_depth": { "target": "sklearn/sklearn.tree._classes/BaseDecisionTree/get_depth" }, - "sklearn/sklearn.tree._classes/BaseDecisionTree/get_n_leaves": { - "target": "sklearn/sklearn.tree._classes/BaseDecisionTree/get_n_leaves" - }, - "sklearn/sklearn.tree._classes/BaseDecisionTree/predict": { - "target": "sklearn/sklearn.tree._classes/BaseDecisionTree/predict" - }, - "sklearn/sklearn.tree._classes/DecisionTreeClassifier/__init__": { - "target": "sklearn/sklearn.tree._classes/DecisionTreeClassifier/__init__" - }, "sklearn/sklearn.tree._classes/DecisionTreeClassifier/_more_tags": { "target": "sklearn/sklearn.tree._classes/DecisionTreeClassifier/_more_tags" }, - "sklearn/sklearn.tree._classes/DecisionTreeClassifier/fit": { - "target": "sklearn/sklearn.tree._classes/DecisionTreeClassifier/fit" - }, "sklearn/sklearn.tree._classes/DecisionTreeClassifier/n_features_@getter": { "target": "sklearn/sklearn.tree._classes/DecisionTreeClassifier/n_features_@getter" }, "sklearn/sklearn.tree._classes/DecisionTreeClassifier/predict_log_proba": { "target": "sklearn/sklearn.tree._classes/DecisionTreeClassifier/predict_log_proba" }, - "sklearn/sklearn.tree._classes/DecisionTreeClassifier/predict_proba": { - "target": "sklearn/sklearn.tree._classes/DecisionTreeClassifier/predict_proba" - }, - "sklearn/sklearn.tree._classes/DecisionTreeRegressor/__init__": { - "target": "sklearn/sklearn.tree._classes/DecisionTreeRegressor/__init__" - }, "sklearn/sklearn.tree._classes/DecisionTreeRegressor/_compute_partial_dependence_recursion": { "target": "sklearn/sklearn.tree._classes/DecisionTreeRegressor/_compute_partial_dependence_recursion" }, - "sklearn/sklearn.tree._classes/DecisionTreeRegressor/fit": { - "target": "sklearn/sklearn.tree._classes/DecisionTreeRegressor/fit" - }, "sklearn/sklearn.tree._classes/DecisionTreeRegressor/n_features_@getter": { "target": "sklearn/sklearn.tree._classes/DecisionTreeRegressor/n_features_@getter" }, - "sklearn/sklearn.tree._classes/ExtraTreeClassifier/__init__": { - "target": "sklearn/sklearn.tree._classes/ExtraTreeClassifier/__init__" - }, - "sklearn/sklearn.tree._classes/ExtraTreeRegressor/__init__": { - "target": "sklearn/sklearn.tree._classes/ExtraTreeRegressor/__init__" - }, "sklearn/sklearn.tree._export/Sentinel/__repr__": { "target": "sklearn/sklearn.tree._export/Sentinel/__repr__" }, @@ -10306,139 +11931,49 @@ "target": "sklearn/sklearn.utils._mocking/CheckingClassifier/decision_function" }, "sklearn/sklearn.utils._mocking/CheckingClassifier/fit": { - "target": "sklearn/sklearn.utils._mocking/CheckingClassifier/fit" - }, - "sklearn/sklearn.utils._mocking/CheckingClassifier/predict": { - "target": "sklearn/sklearn.utils._mocking/CheckingClassifier/predict" - }, - "sklearn/sklearn.utils._mocking/CheckingClassifier/predict_proba": { - "target": "sklearn/sklearn.utils._mocking/CheckingClassifier/predict_proba" - }, - "sklearn/sklearn.utils._mocking/CheckingClassifier/score": { - "target": "sklearn/sklearn.utils._mocking/CheckingClassifier/score" - }, - "sklearn/sklearn.utils._mocking/MockDataFrame/__array__": { - "target": "sklearn/sklearn.utils._mocking/MockDataFrame/__array__" - }, - "sklearn/sklearn.utils._mocking/MockDataFrame/__eq__": { - "target": "sklearn/sklearn.utils._mocking/MockDataFrame/__eq__" - }, - "sklearn/sklearn.utils._mocking/MockDataFrame/__init__": { - "target": "sklearn/sklearn.utils._mocking/MockDataFrame/__init__" - }, - "sklearn/sklearn.utils._mocking/MockDataFrame/__len__": { - "target": "sklearn/sklearn.utils._mocking/MockDataFrame/__len__" - }, - "sklearn/sklearn.utils._mocking/MockDataFrame/__ne__": { - "target": "sklearn/sklearn.utils._mocking/MockDataFrame/__ne__" - }, - "sklearn/sklearn.utils._mocking/MockDataFrame/take": { - "target": "sklearn/sklearn.utils._mocking/MockDataFrame/take" - }, - "sklearn/sklearn.utils._mocking/NoSampleWeightWrapper/__init__": { - "target": "sklearn/sklearn.utils._mocking/NoSampleWeightWrapper/__init__" - }, - "sklearn/sklearn.utils._mocking/NoSampleWeightWrapper/_more_tags": { - "target": "sklearn/sklearn.utils._mocking/NoSampleWeightWrapper/_more_tags" - }, - "sklearn/sklearn.utils._mocking/NoSampleWeightWrapper/fit": { - "target": "sklearn/sklearn.utils._mocking/NoSampleWeightWrapper/fit" - }, - "sklearn/sklearn.utils._mocking/NoSampleWeightWrapper/predict": { - "target": "sklearn/sklearn.utils._mocking/NoSampleWeightWrapper/predict" - }, - "sklearn/sklearn.utils._mocking/NoSampleWeightWrapper/predict_proba": { - "target": "sklearn/sklearn.utils._mocking/NoSampleWeightWrapper/predict_proba" - }, - "sklearn/sklearn.utils._param_validation/Interval/__contains__": { - "target": "sklearn/sklearn.utils._param_validation/Interval/__contains__" - }, - "sklearn/sklearn.utils._param_validation/Interval/__init__": { - "target": "sklearn/sklearn.utils._param_validation/Interval/__init__" - }, - "sklearn/sklearn.utils._param_validation/Interval/__str__": { - "target": "sklearn/sklearn.utils._param_validation/Interval/__str__" - }, - "sklearn/sklearn.utils._param_validation/Interval/_check_params": { - "target": "sklearn/sklearn.utils._param_validation/Interval/_check_params" - }, - "sklearn/sklearn.utils._param_validation/Interval/is_satisfied_by": { - "target": "sklearn/sklearn.utils._param_validation/Interval/is_satisfied_by" - }, - "sklearn/sklearn.utils._param_validation/StrOptions/__init__": { - "target": "sklearn/sklearn.utils._param_validation/StrOptions/__init__" - }, - "sklearn/sklearn.utils._param_validation/StrOptions/__str__": { - "target": "sklearn/sklearn.utils._param_validation/StrOptions/__str__" - }, - "sklearn/sklearn.utils._param_validation/StrOptions/_mark_if_deprecated": { - "target": "sklearn/sklearn.utils._param_validation/StrOptions/_mark_if_deprecated" - }, - "sklearn/sklearn.utils._param_validation/StrOptions/is_satisfied_by": { - "target": "sklearn/sklearn.utils._param_validation/StrOptions/is_satisfied_by" - }, - "sklearn/sklearn.utils._param_validation/_ArrayLikes/__str__": { - "target": "sklearn/sklearn.utils._param_validation/_ArrayLikes/__str__" - }, - "sklearn/sklearn.utils._param_validation/_ArrayLikes/is_satisfied_by": { - "target": "sklearn/sklearn.utils._param_validation/_ArrayLikes/is_satisfied_by" - }, - "sklearn/sklearn.utils._param_validation/_Callables/__str__": { - "target": "sklearn/sklearn.utils._param_validation/_Callables/__str__" - }, - "sklearn/sklearn.utils._param_validation/_Callables/is_satisfied_by": { - "target": "sklearn/sklearn.utils._param_validation/_Callables/is_satisfied_by" - }, - "sklearn/sklearn.utils._param_validation/_Constraint/__str__": { - "target": "sklearn/sklearn.utils._param_validation/_Constraint/__str__" - }, - "sklearn/sklearn.utils._param_validation/_Constraint/is_satisfied_by": { - "target": "sklearn/sklearn.utils._param_validation/_Constraint/is_satisfied_by" - }, - "sklearn/sklearn.utils._param_validation/_InstancesOf/__init__": { - "target": "sklearn/sklearn.utils._param_validation/_InstancesOf/__init__" + "target": "sklearn/sklearn.utils._mocking/CheckingClassifier/fit" }, - "sklearn/sklearn.utils._param_validation/_InstancesOf/__str__": { - "target": "sklearn/sklearn.utils._param_validation/_InstancesOf/__str__" + "sklearn/sklearn.utils._mocking/CheckingClassifier/predict": { + "target": "sklearn/sklearn.utils._mocking/CheckingClassifier/predict" }, - "sklearn/sklearn.utils._param_validation/_InstancesOf/_type_name": { - "target": "sklearn/sklearn.utils._param_validation/_InstancesOf/_type_name" + "sklearn/sklearn.utils._mocking/CheckingClassifier/predict_proba": { + "target": "sklearn/sklearn.utils._mocking/CheckingClassifier/predict_proba" }, - "sklearn/sklearn.utils._param_validation/_InstancesOf/is_satisfied_by": { - "target": "sklearn/sklearn.utils._param_validation/_InstancesOf/is_satisfied_by" + "sklearn/sklearn.utils._mocking/CheckingClassifier/score": { + "target": "sklearn/sklearn.utils._mocking/CheckingClassifier/score" }, - "sklearn/sklearn.utils._param_validation/_NoneConstraint/__str__": { - "target": "sklearn/sklearn.utils._param_validation/_NoneConstraint/__str__" + "sklearn/sklearn.utils._mocking/MockDataFrame/__array__": { + "target": "sklearn/sklearn.utils._mocking/MockDataFrame/__array__" }, - "sklearn/sklearn.utils._param_validation/_NoneConstraint/is_satisfied_by": { - "target": "sklearn/sklearn.utils._param_validation/_NoneConstraint/is_satisfied_by" + "sklearn/sklearn.utils._mocking/MockDataFrame/__eq__": { + "target": "sklearn/sklearn.utils._mocking/MockDataFrame/__eq__" }, - "sklearn/sklearn.utils._param_validation/_RandomStates/__init__": { - "target": "sklearn/sklearn.utils._param_validation/_RandomStates/__init__" + "sklearn/sklearn.utils._mocking/MockDataFrame/__init__": { + "target": "sklearn/sklearn.utils._mocking/MockDataFrame/__init__" }, - "sklearn/sklearn.utils._param_validation/_RandomStates/__str__": { - "target": "sklearn/sklearn.utils._param_validation/_RandomStates/__str__" + "sklearn/sklearn.utils._mocking/MockDataFrame/__len__": { + "target": "sklearn/sklearn.utils._mocking/MockDataFrame/__len__" }, - "sklearn/sklearn.utils._param_validation/_RandomStates/is_satisfied_by": { - "target": "sklearn/sklearn.utils._param_validation/_RandomStates/is_satisfied_by" + "sklearn/sklearn.utils._mocking/MockDataFrame/__ne__": { + "target": "sklearn/sklearn.utils._mocking/MockDataFrame/__ne__" }, - "sklearn/sklearn.utils._param_validation/_SparseMatrices/__str__": { - "target": "sklearn/sklearn.utils._param_validation/_SparseMatrices/__str__" + "sklearn/sklearn.utils._mocking/MockDataFrame/take": { + "target": "sklearn/sklearn.utils._mocking/MockDataFrame/take" }, - "sklearn/sklearn.utils._param_validation/_SparseMatrices/is_satisfied_by": { - "target": "sklearn/sklearn.utils._param_validation/_SparseMatrices/is_satisfied_by" + "sklearn/sklearn.utils._mocking/NoSampleWeightWrapper/__init__": { + "target": "sklearn/sklearn.utils._mocking/NoSampleWeightWrapper/__init__" }, - "sklearn/sklearn.utils._param_validation/generate_invalid_param_val": { - "target": "sklearn/sklearn.utils._param_validation/generate_invalid_param_val" + "sklearn/sklearn.utils._mocking/NoSampleWeightWrapper/_more_tags": { + "target": "sklearn/sklearn.utils._mocking/NoSampleWeightWrapper/_more_tags" }, - "sklearn/sklearn.utils._param_validation/make_constraint": { - "target": "sklearn/sklearn.utils._param_validation/make_constraint" + "sklearn/sklearn.utils._mocking/NoSampleWeightWrapper/fit": { + "target": "sklearn/sklearn.utils._mocking/NoSampleWeightWrapper/fit" }, - "sklearn/sklearn.utils._param_validation/validate_parameter_constraints": { - "target": "sklearn/sklearn.utils._param_validation/validate_parameter_constraints" + "sklearn/sklearn.utils._mocking/NoSampleWeightWrapper/predict": { + "target": "sklearn/sklearn.utils._mocking/NoSampleWeightWrapper/predict" }, - "sklearn/sklearn.utils._param_validation/validate_params": { - "target": "sklearn/sklearn.utils._param_validation/validate_params" + "sklearn/sklearn.utils._mocking/NoSampleWeightWrapper/predict_proba": { + "target": "sklearn/sklearn.utils._mocking/NoSampleWeightWrapper/predict_proba" }, "sklearn/sklearn.utils._pprint/KeyValTuple/__repr__": { "target": "sklearn/sklearn.utils._pprint/KeyValTuple/__repr__" @@ -10614,9 +12149,6 @@ "sklearn/sklearn.utils._testing/create_memmap_backed_data": { "target": "sklearn/sklearn.utils._testing/create_memmap_backed_data" }, - "sklearn/sklearn.utils._testing/ignore_warnings": { - "target": "sklearn/sklearn.utils._testing/ignore_warnings" - }, "sklearn/sklearn.utils._testing/raises": { "target": "sklearn/sklearn.utils._testing/raises" }, @@ -10881,9 +12413,6 @@ "sklearn/sklearn.utils.estimator_checks/check_outliers_train": { "target": "sklearn/sklearn.utils.estimator_checks/check_outliers_train" }, - "sklearn/sklearn.utils.estimator_checks/check_param_validation": { - "target": "sklearn/sklearn.utils.estimator_checks/check_param_validation" - }, "sklearn/sklearn.utils.estimator_checks/check_parameters_default_constructible": { "target": "sklearn/sklearn.utils.estimator_checks/check_parameters_default_constructible" }, @@ -11055,18 +12584,12 @@ "sklearn/sklearn.utils.metaestimators/_BaseComposition/_set_params": { "target": "sklearn/sklearn.utils.metaestimators/_BaseComposition/_set_params" }, - "sklearn/sklearn.utils.metaestimators/_BaseComposition/_validate_names": { - "target": "sklearn/sklearn.utils.metaestimators/_BaseComposition/_validate_names" - }, "sklearn/sklearn.utils.metaestimators/_IffHasAttrDescriptor/__init__": { "target": "sklearn/sklearn.utils.metaestimators/_IffHasAttrDescriptor/__init__" }, "sklearn/sklearn.utils.metaestimators/_IffHasAttrDescriptor/_check": { "target": "sklearn/sklearn.utils.metaestimators/_IffHasAttrDescriptor/_check" }, - "sklearn/sklearn.utils.metaestimators/_safe_split": { - "target": "sklearn/sklearn.utils.metaestimators/_safe_split" - }, "sklearn/sklearn.utils.metaestimators/available_if": { "target": "sklearn/sklearn.utils.metaestimators/available_if" }, @@ -11235,9 +12758,6 @@ "sklearn/sklearn.utils.validation/_num_features": { "target": "sklearn/sklearn.utils.validation/_num_features" }, - "sklearn/sklearn.utils.validation/_num_samples": { - "target": "sklearn/sklearn.utils.validation/_num_samples" - }, "sklearn/sklearn.utils.validation/_pandas_dtype_needs_early_conversion": { "target": "sklearn/sklearn.utils.validation/_pandas_dtype_needs_early_conversion" }, @@ -11342,21 +12862,111 @@ "sklearn/sklearn.calibration/CalibratedClassifierCV/__init__/base_estimator": { "target": "sklearn/sklearn.calibration/CalibratedClassifierCV/__init__/base_estimator" }, + "sklearn/sklearn.cluster._agglomerative/AgglomerativeClustering/__init__/n_clusters": { + "target": "sklearn/sklearn.cluster._agglomerative/AgglomerativeClustering/__init__/n_clusters" + }, + "sklearn/sklearn.cluster._agglomerative/AgglomerativeClustering/fit/y": { + "target": "sklearn/sklearn.cluster._agglomerative/AgglomerativeClustering/fit/y" + }, + "sklearn/sklearn.cluster._agglomerative/FeatureAgglomeration/__init__/n_clusters": { + "target": "sklearn/sklearn.cluster._agglomerative/FeatureAgglomeration/__init__/n_clusters" + }, + "sklearn/sklearn.cluster._agglomerative/FeatureAgglomeration/__init__/connectivity": { + "target": "sklearn/sklearn.cluster._agglomerative/FeatureAgglomeration/__init__/connectivity" + }, + "sklearn/sklearn.cluster._birch/Birch/__init__/branching_factor": { + "target": "sklearn/sklearn.cluster._birch/Birch/__init__/branching_factor" + }, + "sklearn/sklearn.cluster._birch/Birch/__init__/n_clusters": { + "target": "sklearn/sklearn.cluster._birch/Birch/__init__/n_clusters" + }, + "sklearn/sklearn.cluster._dbscan/DBSCAN/__init__/eps": { + "target": "sklearn/sklearn.cluster._dbscan/DBSCAN/__init__/eps" + }, + "sklearn/sklearn.cluster._kmeans/KMeans/__init__/n_clusters": { + "target": "sklearn/sklearn.cluster._kmeans/KMeans/__init__/n_clusters" + }, + "sklearn/sklearn.cluster._kmeans/KMeans/fit/y": { + "target": "sklearn/sklearn.cluster._kmeans/KMeans/fit/y" + }, + "sklearn/sklearn.cluster._kmeans/_BaseKMeans/fit_transform/y": { + "target": "sklearn/sklearn.cluster._kmeans/_BaseKMeans/fit_transform/y" + }, "sklearn/sklearn.cluster._kmeans/k_means/max_iter": { "target": "sklearn/sklearn.cluster._kmeans/k_means/max_iter" }, "sklearn/sklearn.cluster._kmeans/k_means/random_state": { "target": "sklearn/sklearn.cluster._kmeans/k_means/random_state" }, + "sklearn/sklearn.cluster._mean_shift/MeanShift/__init__/bandwidth": { + "target": "sklearn/sklearn.cluster._mean_shift/MeanShift/__init__/bandwidth" + }, + "sklearn/sklearn.cluster._mean_shift/MeanShift/__init__/min_bin_freq": { + "target": "sklearn/sklearn.cluster._mean_shift/MeanShift/__init__/min_bin_freq" + }, "sklearn/sklearn.cluster._mean_shift/estimate_bandwidth/quantile": { "target": "sklearn/sklearn.cluster._mean_shift/estimate_bandwidth/quantile" }, "sklearn/sklearn.cluster._mean_shift/estimate_bandwidth/n_samples": { "target": "sklearn/sklearn.cluster._mean_shift/estimate_bandwidth/n_samples" }, + "sklearn/sklearn.cluster._optics/OPTICS/__init__/min_samples": { + "target": "sklearn/sklearn.cluster._optics/OPTICS/__init__/min_samples" + }, + "sklearn/sklearn.cluster._optics/OPTICS/__init__/max_eps": { + "target": "sklearn/sklearn.cluster._optics/OPTICS/__init__/max_eps" + }, + "sklearn/sklearn.cluster._optics/OPTICS/__init__/metric": { + "target": "sklearn/sklearn.cluster._optics/OPTICS/__init__/metric" + }, + "sklearn/sklearn.cluster._optics/OPTICS/__init__/xi": { + "target": "sklearn/sklearn.cluster._optics/OPTICS/__init__/xi" + }, + "sklearn/sklearn.cluster._spectral/SpectralClustering/__init__/n_clusters": { + "target": "sklearn/sklearn.cluster._spectral/SpectralClustering/__init__/n_clusters" + }, + "sklearn/sklearn.compose._column_transformer/ColumnTransformer/__init__/remainder": { + "target": "sklearn/sklearn.compose._column_transformer/ColumnTransformer/__init__/remainder" + }, + "sklearn/sklearn.compose._column_transformer/ColumnTransformer/__init__/transformer_weights": { + "target": "sklearn/sklearn.compose._column_transformer/ColumnTransformer/__init__/transformer_weights" + }, + "sklearn/sklearn.compose._column_transformer/ColumnTransformer/fit/y": { + "target": "sklearn/sklearn.compose._column_transformer/ColumnTransformer/fit/y" + }, + "sklearn/sklearn.compose._column_transformer/ColumnTransformer/fit_transform/y": { + "target": "sklearn/sklearn.compose._column_transformer/ColumnTransformer/fit_transform/y" + }, + "sklearn/sklearn.compose._column_transformer/make_column_selector/__init__/dtype_include": { + "target": "sklearn/sklearn.compose._column_transformer/make_column_selector/__init__/dtype_include" + }, "sklearn/sklearn.compose._column_transformer/make_column_transformer/remainder": { "target": "sklearn/sklearn.compose._column_transformer/make_column_transformer/remainder" }, + "sklearn/sklearn.compose._target/TransformedTargetRegressor/__init__/regressor": { + "target": "sklearn/sklearn.compose._target/TransformedTargetRegressor/__init__/regressor" + }, + "sklearn/sklearn.compose._target/TransformedTargetRegressor/__init__/transformer": { + "target": "sklearn/sklearn.compose._target/TransformedTargetRegressor/__init__/transformer" + }, + "sklearn/sklearn.compose._target/TransformedTargetRegressor/__init__/func": { + "target": "sklearn/sklearn.compose._target/TransformedTargetRegressor/__init__/func" + }, + "sklearn/sklearn.compose._target/TransformedTargetRegressor/__init__/inverse_func": { + "target": "sklearn/sklearn.compose._target/TransformedTargetRegressor/__init__/inverse_func" + }, + "sklearn/sklearn.covariance._elliptic_envelope/EllipticEnvelope/__init__/contamination": { + "target": "sklearn/sklearn.covariance._elliptic_envelope/EllipticEnvelope/__init__/contamination" + }, + "sklearn/sklearn.covariance._shrunk_covariance/ShrunkCovariance/__init__/shrinkage": { + "target": "sklearn/sklearn.covariance._shrunk_covariance/ShrunkCovariance/__init__/shrinkage" + }, + "sklearn/sklearn.cross_decomposition._pls/PLSRegression/__init__/n_components": { + "target": "sklearn/sklearn.cross_decomposition._pls/PLSRegression/__init__/n_components" + }, + "sklearn/sklearn.cross_decomposition._pls/_PLS/transform/Y": { + "target": "sklearn/sklearn.cross_decomposition._pls/_PLS/transform/Y" + }, "sklearn/sklearn.datasets._base/load_breast_cancer/return_X_y": { "target": "sklearn/sklearn.datasets._base/load_breast_cancer/return_X_y" }, @@ -11432,6 +13042,57 @@ "sklearn/sklearn.datasets._twenty_newsgroups/fetch_20newsgroups/subset": { "target": "sklearn/sklearn.datasets._twenty_newsgroups/fetch_20newsgroups/subset" }, + "sklearn/sklearn.decomposition._factor_analysis/FactorAnalysis/__init__/n_components": { + "target": "sklearn/sklearn.decomposition._factor_analysis/FactorAnalysis/__init__/n_components" + }, + "sklearn/sklearn.decomposition._factor_analysis/FactorAnalysis/__init__/random_state": { + "target": "sklearn/sklearn.decomposition._factor_analysis/FactorAnalysis/__init__/random_state" + }, + "sklearn/sklearn.decomposition._fastica/FastICA/__init__/n_components": { + "target": "sklearn/sklearn.decomposition._fastica/FastICA/__init__/n_components" + }, + "sklearn/sklearn.decomposition._fastica/FastICA/__init__/random_state": { + "target": "sklearn/sklearn.decomposition._fastica/FastICA/__init__/random_state" + }, + "sklearn/sklearn.decomposition._kernel_pca/KernelPCA/__init__/fit_inverse_transform": { + "target": "sklearn/sklearn.decomposition._kernel_pca/KernelPCA/__init__/fit_inverse_transform" + }, + "sklearn/sklearn.decomposition._kernel_pca/KernelPCA/fit_transform/y": { + "target": "sklearn/sklearn.decomposition._kernel_pca/KernelPCA/fit_transform/y" + }, + "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/__init__/learning_method": { + "target": "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/__init__/learning_method" + }, + "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/__init__/learning_offset": { + "target": "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/__init__/learning_offset" + }, + "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/__init__/max_iter": { + "target": "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/__init__/max_iter" + }, + "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/fit/y": { + "target": "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/fit/y" + }, + "sklearn/sklearn.decomposition._nmf/NMF/__init__/n_components": { + "target": "sklearn/sklearn.decomposition._nmf/NMF/__init__/n_components" + }, + "sklearn/sklearn.decomposition._pca/PCA/__init__/n_components": { + "target": "sklearn/sklearn.decomposition._pca/PCA/__init__/n_components" + }, + "sklearn/sklearn.decomposition._pca/PCA/fit/y": { + "target": "sklearn/sklearn.decomposition._pca/PCA/fit/y" + }, + "sklearn/sklearn.decomposition._pca/PCA/fit_transform/y": { + "target": "sklearn/sklearn.decomposition._pca/PCA/fit_transform/y" + }, + "sklearn/sklearn.decomposition._truncated_svd/TruncatedSVD/__init__/n_components": { + "target": "sklearn/sklearn.decomposition._truncated_svd/TruncatedSVD/__init__/n_components" + }, + "sklearn/sklearn.decomposition._truncated_svd/TruncatedSVD/fit/y": { + "target": "sklearn/sklearn.decomposition._truncated_svd/TruncatedSVD/fit/y" + }, + "sklearn/sklearn.decomposition._truncated_svd/TruncatedSVD/fit_transform/y": { + "target": "sklearn/sklearn.decomposition._truncated_svd/TruncatedSVD/fit_transform/y" + }, "sklearn/sklearn.dummy/DummyClassifier/__init__/strategy": { "target": "sklearn/sklearn.dummy/DummyClassifier/__init__/strategy" }, @@ -11441,6 +13102,81 @@ "sklearn/sklearn.dummy/DummyRegressor/__init__/constant": { "target": "sklearn/sklearn.dummy/DummyRegressor/__init__/constant" }, + "sklearn/sklearn.ensemble._bagging/BaggingClassifier/__init__/base_estimator": { + "target": "sklearn/sklearn.ensemble._bagging/BaggingClassifier/__init__/base_estimator" + }, + "sklearn/sklearn.ensemble._bagging/BaggingRegressor/__init__/base_estimator": { + "target": "sklearn/sklearn.ensemble._bagging/BaggingRegressor/__init__/base_estimator" + }, + "sklearn/sklearn.ensemble._bagging/BaseBagging/fit/sample_weight": { + "target": "sklearn/sklearn.ensemble._bagging/BaseBagging/fit/sample_weight" + }, + "sklearn/sklearn.ensemble._forest/BaseForest/fit/sample_weight": { + "target": "sklearn/sklearn.ensemble._forest/BaseForest/fit/sample_weight" + }, + "sklearn/sklearn.ensemble._forest/ExtraTreesRegressor/__init__/n_jobs": { + "target": "sklearn/sklearn.ensemble._forest/ExtraTreesRegressor/__init__/n_jobs" + }, + "sklearn/sklearn.ensemble._forest/RandomTreesEmbedding/__init__/n_estimators": { + "target": "sklearn/sklearn.ensemble._forest/RandomTreesEmbedding/__init__/n_estimators" + }, + "sklearn/sklearn.ensemble._forest/RandomTreesEmbedding/__init__/max_depth": { + "target": "sklearn/sklearn.ensemble._forest/RandomTreesEmbedding/__init__/max_depth" + }, + "sklearn/sklearn.ensemble._forest/RandomTreesEmbedding/__init__/min_samples_split": { + "target": "sklearn/sklearn.ensemble._forest/RandomTreesEmbedding/__init__/min_samples_split" + }, + "sklearn/sklearn.ensemble._forest/RandomTreesEmbedding/__init__/n_jobs": { + "target": "sklearn/sklearn.ensemble._forest/RandomTreesEmbedding/__init__/n_jobs" + }, + "sklearn/sklearn.ensemble._forest/RandomTreesEmbedding/__init__/random_state": { + "target": "sklearn/sklearn.ensemble._forest/RandomTreesEmbedding/__init__/random_state" + }, + "sklearn/sklearn.ensemble._gb/BaseGradientBoosting/fit/sample_weight": { + "target": "sklearn/sklearn.ensemble._gb/BaseGradientBoosting/fit/sample_weight" + }, + "sklearn/sklearn.ensemble._gb/GradientBoostingClassifier/__init__/ccp_alpha": { + "target": "sklearn/sklearn.ensemble._gb/GradientBoostingClassifier/__init__/ccp_alpha" + }, + "sklearn/sklearn.ensemble._gb/GradientBoostingRegressor/__init__/init": { + "target": "sklearn/sklearn.ensemble._gb/GradientBoostingRegressor/__init__/init" + }, + "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingClassifier/__init__/scoring": { + "target": "sklearn/sklearn.ensemble._hist_gradient_boosting.gradient_boosting/HistGradientBoostingClassifier/__init__/scoring" + }, + "sklearn/sklearn.ensemble._iforest/IsolationForest/fit/y": { + "target": "sklearn/sklearn.ensemble._iforest/IsolationForest/fit/y" + }, + "sklearn/sklearn.ensemble._stacking/StackingClassifier/__init__/final_estimator": { + "target": "sklearn/sklearn.ensemble._stacking/StackingClassifier/__init__/final_estimator" + }, + "sklearn/sklearn.ensemble._stacking/StackingRegressor/__init__/final_estimator": { + "target": "sklearn/sklearn.ensemble._stacking/StackingRegressor/__init__/final_estimator" + }, + "sklearn/sklearn.ensemble._voting/VotingClassifier/__init__/voting": { + "target": "sklearn/sklearn.ensemble._voting/VotingClassifier/__init__/voting" + }, + "sklearn/sklearn.ensemble._voting/VotingClassifier/__init__/weights": { + "target": "sklearn/sklearn.ensemble._voting/VotingClassifier/__init__/weights" + }, + "sklearn/sklearn.ensemble._voting/VotingRegressor/__init__/weights": { + "target": "sklearn/sklearn.ensemble._voting/VotingRegressor/__init__/weights" + }, + "sklearn/sklearn.ensemble._weight_boosting/AdaBoostClassifier/__init__/base_estimator": { + "target": "sklearn/sklearn.ensemble._weight_boosting/AdaBoostClassifier/__init__/base_estimator" + }, + "sklearn/sklearn.ensemble._weight_boosting/AdaBoostClassifier/fit/sample_weight": { + "target": "sklearn/sklearn.ensemble._weight_boosting/AdaBoostClassifier/fit/sample_weight" + }, + "sklearn/sklearn.ensemble._weight_boosting/AdaBoostRegressor/__init__/base_estimator": { + "target": "sklearn/sklearn.ensemble._weight_boosting/AdaBoostRegressor/__init__/base_estimator" + }, + "sklearn/sklearn.feature_extraction._hash/FeatureHasher/__init__/n_features": { + "target": "sklearn/sklearn.feature_extraction._hash/FeatureHasher/__init__/n_features" + }, + "sklearn/sklearn.feature_extraction._hash/FeatureHasher/__init__/dtype": { + "target": "sklearn/sklearn.feature_extraction._hash/FeatureHasher/__init__/dtype" + }, "sklearn/sklearn.feature_extraction.image/PatchExtractor/__init__/patch_size": { "target": "sklearn/sklearn.feature_extraction.image/PatchExtractor/__init__/patch_size" }, @@ -11513,15 +13249,81 @@ "sklearn/sklearn.feature_extraction.text/TfidfVectorizer/fit_transform/y": { "target": "sklearn/sklearn.feature_extraction.text/TfidfVectorizer/fit_transform/y" }, + "sklearn/sklearn.feature_selection._from_model/SelectFromModel/__init__/prefit": { + "target": "sklearn/sklearn.feature_selection._from_model/SelectFromModel/__init__/prefit" + }, + "sklearn/sklearn.feature_selection._from_model/SelectFromModel/fit/y": { + "target": "sklearn/sklearn.feature_selection._from_model/SelectFromModel/fit/y" + }, "sklearn/sklearn.feature_selection._mutual_info/mutual_info_regression/discrete_features": { "target": "sklearn/sklearn.feature_selection._mutual_info/mutual_info_regression/discrete_features" }, + "sklearn/sklearn.feature_selection._rfe/RFECV/__init__/cv": { + "target": "sklearn/sklearn.feature_selection._rfe/RFECV/__init__/cv" + }, + "sklearn/sklearn.feature_selection._rfe/RFECV/__init__/scoring": { + "target": "sklearn/sklearn.feature_selection._rfe/RFECV/__init__/scoring" + }, + "sklearn/sklearn.feature_selection._univariate_selection/GenericUnivariateSelect/__init__/score_func": { + "target": "sklearn/sklearn.feature_selection._univariate_selection/GenericUnivariateSelect/__init__/score_func" + }, + "sklearn/sklearn.feature_selection._univariate_selection/SelectFpr/__init__/score_func": { + "target": "sklearn/sklearn.feature_selection._univariate_selection/SelectFpr/__init__/score_func" + }, + "sklearn/sklearn.feature_selection._univariate_selection/SelectFpr/__init__/alpha": { + "target": "sklearn/sklearn.feature_selection._univariate_selection/SelectFpr/__init__/alpha" + }, + "sklearn/sklearn.feature_selection._univariate_selection/SelectKBest/__init__/score_func": { + "target": "sklearn/sklearn.feature_selection._univariate_selection/SelectKBest/__init__/score_func" + }, + "sklearn/sklearn.feature_selection._univariate_selection/SelectKBest/__init__/k": { + "target": "sklearn/sklearn.feature_selection._univariate_selection/SelectKBest/__init__/k" + }, + "sklearn/sklearn.feature_selection._univariate_selection/SelectPercentile/__init__/score_func": { + "target": "sklearn/sklearn.feature_selection._univariate_selection/SelectPercentile/__init__/score_func" + }, + "sklearn/sklearn.feature_selection._univariate_selection/SelectPercentile/__init__/percentile": { + "target": "sklearn/sklearn.feature_selection._univariate_selection/SelectPercentile/__init__/percentile" + }, + "sklearn/sklearn.feature_selection._variance_threshold/VarianceThreshold/fit/y": { + "target": "sklearn/sklearn.feature_selection._variance_threshold/VarianceThreshold/fit/y" + }, + "sklearn/sklearn.gaussian_process._gpc/GaussianProcessClassifier/__init__/kernel": { + "target": "sklearn/sklearn.gaussian_process._gpc/GaussianProcessClassifier/__init__/kernel" + }, + "sklearn/sklearn.gaussian_process._gpr/GaussianProcessRegressor/__init__/kernel": { + "target": "sklearn/sklearn.gaussian_process._gpr/GaussianProcessRegressor/__init__/kernel" + }, "sklearn/sklearn.gaussian_process.kernels/RBF/__init__/length_scale_bounds": { "target": "sklearn/sklearn.gaussian_process.kernels/RBF/__init__/length_scale_bounds" }, "sklearn/sklearn.gaussian_process.kernels/WhiteKernel/__init__/noise_level": { "target": "sklearn/sklearn.gaussian_process.kernels/WhiteKernel/__init__/noise_level" }, + "sklearn/sklearn.impute._base/MissingIndicator/fit/y": { + "target": "sklearn/sklearn.impute._base/MissingIndicator/fit/y" + }, + "sklearn/sklearn.impute._base/SimpleImputer/__init__/missing_values": { + "target": "sklearn/sklearn.impute._base/SimpleImputer/__init__/missing_values" + }, + "sklearn/sklearn.impute._base/SimpleImputer/__init__/strategy": { + "target": "sklearn/sklearn.impute._base/SimpleImputer/__init__/strategy" + }, + "sklearn/sklearn.impute._base/SimpleImputer/fit/y": { + "target": "sklearn/sklearn.impute._base/SimpleImputer/fit/y" + }, + "sklearn/sklearn.impute._iterative/IterativeImputer/__init__/estimator": { + "target": "sklearn/sklearn.impute._iterative/IterativeImputer/__init__/estimator" + }, + "sklearn/sklearn.impute._iterative/IterativeImputer/__init__/missing_values": { + "target": "sklearn/sklearn.impute._iterative/IterativeImputer/__init__/missing_values" + }, + "sklearn/sklearn.impute._iterative/IterativeImputer/__init__/min_value": { + "target": "sklearn/sklearn.impute._iterative/IterativeImputer/__init__/min_value" + }, + "sklearn/sklearn.impute._iterative/IterativeImputer/__init__/max_value": { + "target": "sklearn/sklearn.impute._iterative/IterativeImputer/__init__/max_value" + }, "sklearn/sklearn.inspection._permutation_importance/permutation_importance/scoring": { "target": "sklearn/sklearn.inspection._permutation_importance/permutation_importance/scoring" }, @@ -11549,6 +13351,108 @@ "sklearn/sklearn.kernel_ridge/KernelRidge/__init__/coef0": { "target": "sklearn/sklearn.kernel_ridge/KernelRidge/__init__/coef0" }, + "sklearn/sklearn.linear_model._base/LinearRegression/fit/sample_weight": { + "target": "sklearn/sklearn.linear_model._base/LinearRegression/fit/sample_weight" + }, + "sklearn/sklearn.linear_model._bayes/ARDRegression/predict/return_std": { + "target": "sklearn/sklearn.linear_model._bayes/ARDRegression/predict/return_std" + }, + "sklearn/sklearn.linear_model._coordinate_descent/ElasticNet/fit/sample_weight": { + "target": "sklearn/sklearn.linear_model._coordinate_descent/ElasticNet/fit/sample_weight" + }, + "sklearn/sklearn.linear_model._coordinate_descent/ElasticNetCV/__init__/alphas": { + "target": "sklearn/sklearn.linear_model._coordinate_descent/ElasticNetCV/__init__/alphas" + }, + "sklearn/sklearn.linear_model._coordinate_descent/LassoCV/__init__/alphas": { + "target": "sklearn/sklearn.linear_model._coordinate_descent/LassoCV/__init__/alphas" + }, + "sklearn/sklearn.linear_model._glm.glm/TweedieRegressor/__init__/link": { + "target": "sklearn/sklearn.linear_model._glm.glm/TweedieRegressor/__init__/link" + }, + "sklearn/sklearn.linear_model._least_angle/Lars/__init__/fit_intercept": { + "target": "sklearn/sklearn.linear_model._least_angle/Lars/__init__/fit_intercept" + }, + "sklearn/sklearn.linear_model._least_angle/LassoLarsCV/__init__/normalize": { + "target": "sklearn/sklearn.linear_model._least_angle/LassoLarsCV/__init__/normalize" + }, + "sklearn/sklearn.linear_model._least_angle/LassoLarsIC/__init__/criterion": { + "target": "sklearn/sklearn.linear_model._least_angle/LassoLarsIC/__init__/criterion" + }, + "sklearn/sklearn.linear_model._least_angle/LassoLarsIC/__init__/fit_intercept": { + "target": "sklearn/sklearn.linear_model._least_angle/LassoLarsIC/__init__/fit_intercept" + }, + "sklearn/sklearn.linear_model._logistic/LogisticRegression/fit/sample_weight": { + "target": "sklearn/sklearn.linear_model._logistic/LogisticRegression/fit/sample_weight" + }, + "sklearn/sklearn.linear_model._logistic/LogisticRegressionCV/__init__/cv": { + "target": "sklearn/sklearn.linear_model._logistic/LogisticRegressionCV/__init__/cv" + }, + "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveRegressor/__init__/epsilon": { + "target": "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveRegressor/__init__/epsilon" + }, + "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveRegressor/__init__/warm_start": { + "target": "sklearn/sklearn.linear_model._passive_aggressive/PassiveAggressiveRegressor/__init__/warm_start" + }, + "sklearn/sklearn.linear_model._ransac/RANSACRegressor/__init__/estimator": { + "target": "sklearn/sklearn.linear_model._ransac/RANSACRegressor/__init__/estimator" + }, + "sklearn/sklearn.linear_model._ransac/RANSACRegressor/__init__/base_estimator": { + "target": "sklearn/sklearn.linear_model._ransac/RANSACRegressor/__init__/base_estimator" + }, + "sklearn/sklearn.linear_model._ridge/Ridge/fit/sample_weight": { + "target": "sklearn/sklearn.linear_model._ridge/Ridge/fit/sample_weight" + }, + "sklearn/sklearn.linear_model._ridge/RidgeClassifierCV/__init__/alphas": { + "target": "sklearn/sklearn.linear_model._ridge/RidgeClassifierCV/__init__/alphas" + }, + "sklearn/sklearn.linear_model._ridge/_BaseRidgeCV/__init__/alphas": { + "target": "sklearn/sklearn.linear_model._ridge/_BaseRidgeCV/__init__/alphas" + }, + "sklearn/sklearn.linear_model._stochastic_gradient/BaseSGDClassifier/partial_fit/classes": { + "target": "sklearn/sklearn.linear_model._stochastic_gradient/BaseSGDClassifier/partial_fit/classes" + }, + "sklearn/sklearn.linear_model._stochastic_gradient/SGDClassifier/__init__/loss": { + "target": "sklearn/sklearn.linear_model._stochastic_gradient/SGDClassifier/__init__/loss" + }, + "sklearn/sklearn.linear_model._stochastic_gradient/SGDClassifier/__init__/epsilon": { + "target": "sklearn/sklearn.linear_model._stochastic_gradient/SGDClassifier/__init__/epsilon" + }, + "sklearn/sklearn.linear_model._stochastic_gradient/SGDClassifier/__init__/class_weight": { + "target": "sklearn/sklearn.linear_model._stochastic_gradient/SGDClassifier/__init__/class_weight" + }, + "sklearn/sklearn.linear_model._stochastic_gradient/SGDRegressor/__init__/epsilon": { + "target": "sklearn/sklearn.linear_model._stochastic_gradient/SGDRegressor/__init__/epsilon" + }, + "sklearn/sklearn.manifold._locally_linear/LocallyLinearEmbedding/__init__/n_neighbors": { + "target": "sklearn/sklearn.manifold._locally_linear/LocallyLinearEmbedding/__init__/n_neighbors" + }, + "sklearn/sklearn.manifold._locally_linear/LocallyLinearEmbedding/__init__/method": { + "target": "sklearn/sklearn.manifold._locally_linear/LocallyLinearEmbedding/__init__/method" + }, + "sklearn/sklearn.manifold._locally_linear/LocallyLinearEmbedding/__init__/random_state": { + "target": "sklearn/sklearn.manifold._locally_linear/LocallyLinearEmbedding/__init__/random_state" + }, + "sklearn/sklearn.manifold._mds/MDS/__init__/n_components": { + "target": "sklearn/sklearn.manifold._mds/MDS/__init__/n_components" + }, + "sklearn/sklearn.manifold._mds/MDS/__init__/n_init": { + "target": "sklearn/sklearn.manifold._mds/MDS/__init__/n_init" + }, + "sklearn/sklearn.manifold._mds/MDS/__init__/max_iter": { + "target": "sklearn/sklearn.manifold._mds/MDS/__init__/max_iter" + }, + "sklearn/sklearn.manifold._mds/MDS/__init__/dissimilarity": { + "target": "sklearn/sklearn.manifold._mds/MDS/__init__/dissimilarity" + }, + "sklearn/sklearn.manifold._spectral_embedding/SpectralEmbedding/__init__/n_components": { + "target": "sklearn/sklearn.manifold._spectral_embedding/SpectralEmbedding/__init__/n_components" + }, + "sklearn/sklearn.manifold._spectral_embedding/SpectralEmbedding/__init__/n_neighbors": { + "target": "sklearn/sklearn.manifold._spectral_embedding/SpectralEmbedding/__init__/n_neighbors" + }, + "sklearn/sklearn.manifold._t_sne/TSNE/fit_transform/y": { + "target": "sklearn/sklearn.manifold._t_sne/TSNE/fit_transform/y" + }, "sklearn/sklearn.metrics._classification/accuracy_score/sample_weight": { "target": "sklearn/sklearn.metrics._classification/accuracy_score/sample_weight" }, @@ -11612,6 +13516,18 @@ "sklearn/sklearn.metrics._classification/recall_score/average": { "target": "sklearn/sklearn.metrics._classification/recall_score/average" }, + "sklearn/sklearn.metrics._plot.confusion_matrix/ConfusionMatrixDisplay/__init__/display_labels": { + "target": "sklearn/sklearn.metrics._plot.confusion_matrix/ConfusionMatrixDisplay/__init__/display_labels" + }, + "sklearn/sklearn.metrics._plot.confusion_matrix/ConfusionMatrixDisplay/plot/xticks_rotation": { + "target": "sklearn/sklearn.metrics._plot.confusion_matrix/ConfusionMatrixDisplay/plot/xticks_rotation" + }, + "sklearn/sklearn.metrics._plot.confusion_matrix/ConfusionMatrixDisplay/plot/values_format": { + "target": "sklearn/sklearn.metrics._plot.confusion_matrix/ConfusionMatrixDisplay/plot/values_format" + }, + "sklearn/sklearn.metrics._plot.confusion_matrix/ConfusionMatrixDisplay/plot/ax": { + "target": "sklearn/sklearn.metrics._plot.confusion_matrix/ConfusionMatrixDisplay/plot/ax" + }, "sklearn/sklearn.metrics._plot.confusion_matrix/plot_confusion_matrix/labels": { "target": "sklearn/sklearn.metrics._plot.confusion_matrix/plot_confusion_matrix/labels" }, @@ -11678,6 +13594,177 @@ "sklearn/sklearn.metrics.pairwise/rbf_kernel/Y": { "target": "sklearn/sklearn.metrics.pairwise/rbf_kernel/Y" }, + "sklearn/sklearn.mixture._base/BaseMixture/fit/y": { + "target": "sklearn/sklearn.mixture._base/BaseMixture/fit/y" + }, + "sklearn/sklearn.mixture._base/BaseMixture/fit_predict/y": { + "target": "sklearn/sklearn.mixture._base/BaseMixture/fit_predict/y" + }, + "sklearn/sklearn.mixture._base/BaseMixture/sample/n_samples": { + "target": "sklearn/sklearn.mixture._base/BaseMixture/sample/n_samples" + }, + "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/__init__/n_components": { + "target": "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/__init__/n_components" + }, + "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/__init__/max_iter": { + "target": "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/__init__/max_iter" + }, + "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/__init__/random_state": { + "target": "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/__init__/random_state" + }, + "sklearn/sklearn.mixture._gaussian_mixture/GaussianMixture/__init__/weights_init": { + "target": "sklearn/sklearn.mixture._gaussian_mixture/GaussianMixture/__init__/weights_init" + }, + "sklearn/sklearn.mixture._gaussian_mixture/GaussianMixture/__init__/means_init": { + "target": "sklearn/sklearn.mixture._gaussian_mixture/GaussianMixture/__init__/means_init" + }, + "sklearn/sklearn.mixture._gaussian_mixture/GaussianMixture/__init__/precisions_init": { + "target": "sklearn/sklearn.mixture._gaussian_mixture/GaussianMixture/__init__/precisions_init" + }, + "sklearn/sklearn.model_selection._search/BaseSearchCV/fit/y": { + "target": "sklearn/sklearn.model_selection._search/BaseSearchCV/fit/y" + }, + "sklearn/sklearn.model_selection._search/BaseSearchCV/fit/groups": { + "target": "sklearn/sklearn.model_selection._search/BaseSearchCV/fit/groups" + }, + "sklearn/sklearn.model_selection._search/BaseSearchCV/score/y": { + "target": "sklearn/sklearn.model_selection._search/BaseSearchCV/score/y" + }, + "sklearn/sklearn.model_selection._search/GridSearchCV/__init__/error_score": { + "target": "sklearn/sklearn.model_selection._search/GridSearchCV/__init__/error_score" + }, + "sklearn/sklearn.model_selection._search/RandomizedSearchCV/__init__/n_jobs": { + "target": "sklearn/sklearn.model_selection._search/RandomizedSearchCV/__init__/n_jobs" + }, + "sklearn/sklearn.model_selection._search/RandomizedSearchCV/__init__/error_score": { + "target": "sklearn/sklearn.model_selection._search/RandomizedSearchCV/__init__/error_score" + }, + "sklearn/sklearn.model_selection._search_successive_halving/BaseSuccessiveHalving/fit/y": { + "target": "sklearn/sklearn.model_selection._search_successive_halving/BaseSuccessiveHalving/fit/y" + }, + "sklearn/sklearn.model_selection._search_successive_halving/HalvingRandomSearchCV/__init__/resource": { + "target": "sklearn/sklearn.model_selection._search_successive_halving/HalvingRandomSearchCV/__init__/resource" + }, + "sklearn/sklearn.model_selection._search_successive_halving/HalvingRandomSearchCV/__init__/max_resources": { + "target": "sklearn/sklearn.model_selection._search_successive_halving/HalvingRandomSearchCV/__init__/max_resources" + }, + "sklearn/sklearn.model_selection._search_successive_halving/HalvingRandomSearchCV/__init__/cv": { + "target": "sklearn/sklearn.model_selection._search_successive_halving/HalvingRandomSearchCV/__init__/cv" + }, + "sklearn/sklearn.model_selection._search_successive_halving/HalvingRandomSearchCV/__init__/scoring": { + "target": "sklearn/sklearn.model_selection._search_successive_halving/HalvingRandomSearchCV/__init__/scoring" + }, + "sklearn/sklearn.model_selection._search_successive_halving/HalvingRandomSearchCV/__init__/random_state": { + "target": "sklearn/sklearn.model_selection._search_successive_halving/HalvingRandomSearchCV/__init__/random_state" + }, + "sklearn/sklearn.model_selection._search_successive_halving/HalvingRandomSearchCV/__init__/verbose": { + "target": "sklearn/sklearn.model_selection._search_successive_halving/HalvingRandomSearchCV/__init__/verbose" + }, + "sklearn/sklearn.model_selection._split/BaseShuffleSplit/get_n_splits/X": { + "target": "sklearn/sklearn.model_selection._split/BaseShuffleSplit/get_n_splits/X" + }, + "sklearn/sklearn.model_selection._split/BaseShuffleSplit/get_n_splits/y": { + "target": "sklearn/sklearn.model_selection._split/BaseShuffleSplit/get_n_splits/y" + }, + "sklearn/sklearn.model_selection._split/BaseShuffleSplit/get_n_splits/groups": { + "target": "sklearn/sklearn.model_selection._split/BaseShuffleSplit/get_n_splits/groups" + }, + "sklearn/sklearn.model_selection._split/BaseShuffleSplit/split/y": { + "target": "sklearn/sklearn.model_selection._split/BaseShuffleSplit/split/y" + }, + "sklearn/sklearn.model_selection._split/BaseShuffleSplit/split/groups": { + "target": "sklearn/sklearn.model_selection._split/BaseShuffleSplit/split/groups" + }, + "sklearn/sklearn.model_selection._split/GroupKFold/split/y": { + "target": "sklearn/sklearn.model_selection._split/GroupKFold/split/y" + }, + "sklearn/sklearn.model_selection._split/GroupKFold/split/groups": { + "target": "sklearn/sklearn.model_selection._split/GroupKFold/split/groups" + }, + "sklearn/sklearn.model_selection._split/GroupShuffleSplit/__init__/train_size": { + "target": "sklearn/sklearn.model_selection._split/GroupShuffleSplit/__init__/train_size" + }, + "sklearn/sklearn.model_selection._split/GroupShuffleSplit/__init__/random_state": { + "target": "sklearn/sklearn.model_selection._split/GroupShuffleSplit/__init__/random_state" + }, + "sklearn/sklearn.model_selection._split/GroupShuffleSplit/split/y": { + "target": "sklearn/sklearn.model_selection._split/GroupShuffleSplit/split/y" + }, + "sklearn/sklearn.model_selection._split/GroupShuffleSplit/split/groups": { + "target": "sklearn/sklearn.model_selection._split/GroupShuffleSplit/split/groups" + }, + "sklearn/sklearn.model_selection._split/KFold/__init__/shuffle": { + "target": "sklearn/sklearn.model_selection._split/KFold/__init__/shuffle" + }, + "sklearn/sklearn.model_selection._split/LeaveOneGroupOut/get_n_splits/y": { + "target": "sklearn/sklearn.model_selection._split/LeaveOneGroupOut/get_n_splits/y" + }, + "sklearn/sklearn.model_selection._split/LeaveOneGroupOut/split/y": { + "target": "sklearn/sklearn.model_selection._split/LeaveOneGroupOut/split/y" + }, + "sklearn/sklearn.model_selection._split/LeaveOneGroupOut/split/groups": { + "target": "sklearn/sklearn.model_selection._split/LeaveOneGroupOut/split/groups" + }, + "sklearn/sklearn.model_selection._split/RepeatedKFold/__init__/n_splits": { + "target": "sklearn/sklearn.model_selection._split/RepeatedKFold/__init__/n_splits" + }, + "sklearn/sklearn.model_selection._split/RepeatedKFold/__init__/n_repeats": { + "target": "sklearn/sklearn.model_selection._split/RepeatedKFold/__init__/n_repeats" + }, + "sklearn/sklearn.model_selection._split/RepeatedKFold/__init__/random_state": { + "target": "sklearn/sklearn.model_selection._split/RepeatedKFold/__init__/random_state" + }, + "sklearn/sklearn.model_selection._split/RepeatedStratifiedKFold/__init__/n_splits": { + "target": "sklearn/sklearn.model_selection._split/RepeatedStratifiedKFold/__init__/n_splits" + }, + "sklearn/sklearn.model_selection._split/ShuffleSplit/__init__/test_size": { + "target": "sklearn/sklearn.model_selection._split/ShuffleSplit/__init__/test_size" + }, + "sklearn/sklearn.model_selection._split/StratifiedKFold/__init__/shuffle": { + "target": "sklearn/sklearn.model_selection._split/StratifiedKFold/__init__/shuffle" + }, + "sklearn/sklearn.model_selection._split/StratifiedKFold/split/groups": { + "target": "sklearn/sklearn.model_selection._split/StratifiedKFold/split/groups" + }, + "sklearn/sklearn.model_selection._split/StratifiedShuffleSplit/split/groups": { + "target": "sklearn/sklearn.model_selection._split/StratifiedShuffleSplit/split/groups" + }, + "sklearn/sklearn.model_selection._split/TimeSeriesSplit/split/y": { + "target": "sklearn/sklearn.model_selection._split/TimeSeriesSplit/split/y" + }, + "sklearn/sklearn.model_selection._split/TimeSeriesSplit/split/groups": { + "target": "sklearn/sklearn.model_selection._split/TimeSeriesSplit/split/groups" + }, + "sklearn/sklearn.model_selection._split/_BaseKFold/get_n_splits/X": { + "target": "sklearn/sklearn.model_selection._split/_BaseKFold/get_n_splits/X" + }, + "sklearn/sklearn.model_selection._split/_BaseKFold/get_n_splits/y": { + "target": "sklearn/sklearn.model_selection._split/_BaseKFold/get_n_splits/y" + }, + "sklearn/sklearn.model_selection._split/_BaseKFold/get_n_splits/groups": { + "target": "sklearn/sklearn.model_selection._split/_BaseKFold/get_n_splits/groups" + }, + "sklearn/sklearn.model_selection._split/_BaseKFold/split/y": { + "target": "sklearn/sklearn.model_selection._split/_BaseKFold/split/y" + }, + "sklearn/sklearn.model_selection._split/_BaseKFold/split/groups": { + "target": "sklearn/sklearn.model_selection._split/_BaseKFold/split/groups" + }, + "sklearn/sklearn.model_selection._split/_RepeatedSplits/get_n_splits/X": { + "target": "sklearn/sklearn.model_selection._split/_RepeatedSplits/get_n_splits/X" + }, + "sklearn/sklearn.model_selection._split/_RepeatedSplits/get_n_splits/y": { + "target": "sklearn/sklearn.model_selection._split/_RepeatedSplits/get_n_splits/y" + }, + "sklearn/sklearn.model_selection._split/_RepeatedSplits/get_n_splits/groups": { + "target": "sklearn/sklearn.model_selection._split/_RepeatedSplits/get_n_splits/groups" + }, + "sklearn/sklearn.model_selection._split/_RepeatedSplits/split/y": { + "target": "sklearn/sklearn.model_selection._split/_RepeatedSplits/split/y" + }, + "sklearn/sklearn.model_selection._split/_RepeatedSplits/split/groups": { + "target": "sklearn/sklearn.model_selection._split/_RepeatedSplits/split/groups" + }, "sklearn/sklearn.model_selection._split/check_cv/y": { "target": "sklearn/sklearn.model_selection._split/check_cv/y" }, @@ -11744,6 +13831,9 @@ "sklearn/sklearn.model_selection._validation/validation_curve/scoring": { "target": "sklearn/sklearn.model_selection._validation/validation_curve/scoring" }, + "sklearn/sklearn.multioutput/_BaseChain/__init__/order": { + "target": "sklearn/sklearn.multioutput/_BaseChain/__init__/order" + }, "sklearn/sklearn.naive_bayes/BernoulliNB/__init__/class_prior": { "target": "sklearn/sklearn.naive_bayes/BernoulliNB/__init__/class_prior" }, @@ -11762,8 +13852,65 @@ "sklearn/sklearn.naive_bayes/GaussianNB/partial_fit/classes": { "target": "sklearn/sklearn.naive_bayes/GaussianNB/partial_fit/classes" }, - "sklearn/sklearn.naive_bayes/MultinomialNB/__init__/class_prior": { - "target": "sklearn/sklearn.naive_bayes/MultinomialNB/__init__/class_prior" + "sklearn/sklearn.naive_bayes/MultinomialNB/__init__/class_prior": { + "target": "sklearn/sklearn.naive_bayes/MultinomialNB/__init__/class_prior" + }, + "sklearn/sklearn.naive_bayes/_BaseDiscreteNB/fit/sample_weight": { + "target": "sklearn/sklearn.naive_bayes/_BaseDiscreteNB/fit/sample_weight" + }, + "sklearn/sklearn.naive_bayes/_BaseDiscreteNB/partial_fit/classes": { + "target": "sklearn/sklearn.naive_bayes/_BaseDiscreteNB/partial_fit/classes" + }, + "sklearn/sklearn.neighbors._base/KNeighborsMixin/kneighbors/X": { + "target": "sklearn/sklearn.neighbors._base/KNeighborsMixin/kneighbors/X" + }, + "sklearn/sklearn.neighbors._base/RadiusNeighborsMixin/radius_neighbors/X": { + "target": "sklearn/sklearn.neighbors._base/RadiusNeighborsMixin/radius_neighbors/X" + }, + "sklearn/sklearn.neighbors._base/RadiusNeighborsMixin/radius_neighbors/radius": { + "target": "sklearn/sklearn.neighbors._base/RadiusNeighborsMixin/radius_neighbors/radius" + }, + "sklearn/sklearn.neighbors._base/RadiusNeighborsMixin/radius_neighbors/return_distance": { + "target": "sklearn/sklearn.neighbors._base/RadiusNeighborsMixin/radius_neighbors/return_distance" + }, + "sklearn/sklearn.neighbors._classification/RadiusNeighborsClassifier/__init__/radius": { + "target": "sklearn/sklearn.neighbors._classification/RadiusNeighborsClassifier/__init__/radius" + }, + "sklearn/sklearn.neighbors._lof/LocalOutlierFactor/__init__/contamination": { + "target": "sklearn/sklearn.neighbors._lof/LocalOutlierFactor/__init__/contamination" + }, + "sklearn/sklearn.neighbors._lof/LocalOutlierFactor/fit_predict/y": { + "target": "sklearn/sklearn.neighbors._lof/LocalOutlierFactor/fit_predict/y" + }, + "sklearn/sklearn.neighbors._regression/KNeighborsRegressor/__init__/metric_params": { + "target": "sklearn/sklearn.neighbors._regression/KNeighborsRegressor/__init__/metric_params" + }, + "sklearn/sklearn.neighbors._regression/RadiusNeighborsRegressor/__init__/radius": { + "target": "sklearn/sklearn.neighbors._regression/RadiusNeighborsRegressor/__init__/radius" + }, + "sklearn/sklearn.neighbors._regression/RadiusNeighborsRegressor/__init__/weights": { + "target": "sklearn/sklearn.neighbors._regression/RadiusNeighborsRegressor/__init__/weights" + }, + "sklearn/sklearn.neighbors._regression/RadiusNeighborsRegressor/__init__/p": { + "target": "sklearn/sklearn.neighbors._regression/RadiusNeighborsRegressor/__init__/p" + }, + "sklearn/sklearn.neighbors._regression/RadiusNeighborsRegressor/__init__/metric": { + "target": "sklearn/sklearn.neighbors._regression/RadiusNeighborsRegressor/__init__/metric" + }, + "sklearn/sklearn.neighbors._unsupervised/NearestNeighbors/__init__/radius": { + "target": "sklearn/sklearn.neighbors._unsupervised/NearestNeighbors/__init__/radius" + }, + "sklearn/sklearn.neighbors._unsupervised/NearestNeighbors/fit/y": { + "target": "sklearn/sklearn.neighbors._unsupervised/NearestNeighbors/fit/y" + }, + "sklearn/sklearn.neural_network._multilayer_perceptron/MLPClassifier/__init__/hidden_layer_sizes": { + "target": "sklearn/sklearn.neural_network._multilayer_perceptron/MLPClassifier/__init__/hidden_layer_sizes" + }, + "sklearn/sklearn.neural_network._multilayer_perceptron/MLPRegressor/__init__/hidden_layer_sizes": { + "target": "sklearn/sklearn.neural_network._multilayer_perceptron/MLPRegressor/__init__/hidden_layer_sizes" + }, + "sklearn/sklearn.neural_network._rbm/BernoulliRBM/__init__/random_state": { + "target": "sklearn/sklearn.neural_network._rbm/BernoulliRBM/__init__/random_state" }, "sklearn/sklearn.pipeline/FeatureUnion/__init__/n_jobs": { "target": "sklearn/sklearn.pipeline/FeatureUnion/__init__/n_jobs" @@ -11786,6 +13933,18 @@ "sklearn/sklearn.pipeline/Pipeline/score/y": { "target": "sklearn/sklearn.pipeline/Pipeline/score/y" }, + "sklearn/sklearn.preprocessing._data/MinMaxScaler/__init__/feature_range": { + "target": "sklearn/sklearn.preprocessing._data/MinMaxScaler/__init__/feature_range" + }, + "sklearn/sklearn.preprocessing._data/MinMaxScaler/fit/y": { + "target": "sklearn/sklearn.preprocessing._data/MinMaxScaler/fit/y" + }, + "sklearn/sklearn.preprocessing._data/RobustScaler/__init__/quantile_range": { + "target": "sklearn/sklearn.preprocessing._data/RobustScaler/__init__/quantile_range" + }, + "sklearn/sklearn.preprocessing._data/StandardScaler/fit/y": { + "target": "sklearn/sklearn.preprocessing._data/StandardScaler/fit/y" + }, "sklearn/sklearn.preprocessing._data/binarize/threshold": { "target": "sklearn/sklearn.preprocessing._data/binarize/threshold" }, @@ -11801,12 +13960,81 @@ "sklearn/sklearn.preprocessing._data/quantile_transform/random_state": { "target": "sklearn/sklearn.preprocessing._data/quantile_transform/random_state" }, + "sklearn/sklearn.preprocessing._discretization/KBinsDiscretizer/__init__/strategy": { + "target": "sklearn/sklearn.preprocessing._discretization/KBinsDiscretizer/__init__/strategy" + }, + "sklearn/sklearn.preprocessing._encoders/OneHotEncoder/__init__/categories": { + "target": "sklearn/sklearn.preprocessing._encoders/OneHotEncoder/__init__/categories" + }, + "sklearn/sklearn.preprocessing._encoders/OneHotEncoder/__init__/dtype": { + "target": "sklearn/sklearn.preprocessing._encoders/OneHotEncoder/__init__/dtype" + }, + "sklearn/sklearn.preprocessing._encoders/OneHotEncoder/fit_transform/y": { + "target": "sklearn/sklearn.preprocessing._encoders/OneHotEncoder/fit_transform/y" + }, + "sklearn/sklearn.preprocessing._encoders/OneHotEncoder/get_feature_names/input_features": { + "target": "sklearn/sklearn.preprocessing._encoders/OneHotEncoder/get_feature_names/input_features" + }, + "sklearn/sklearn.preprocessing._encoders/OrdinalEncoder/__init__/categories": { + "target": "sklearn/sklearn.preprocessing._encoders/OrdinalEncoder/__init__/categories" + }, + "sklearn/sklearn.preprocessing._encoders/OrdinalEncoder/__init__/dtype": { + "target": "sklearn/sklearn.preprocessing._encoders/OrdinalEncoder/__init__/dtype" + }, + "sklearn/sklearn.preprocessing._function_transformer/FunctionTransformer/__init__/func": { + "target": "sklearn/sklearn.preprocessing._function_transformer/FunctionTransformer/__init__/func" + }, + "sklearn/sklearn.preprocessing._function_transformer/FunctionTransformer/__init__/inverse_func": { + "target": "sklearn/sklearn.preprocessing._function_transformer/FunctionTransformer/__init__/inverse_func" + }, + "sklearn/sklearn.preprocessing._function_transformer/FunctionTransformer/__init__/kw_args": { + "target": "sklearn/sklearn.preprocessing._function_transformer/FunctionTransformer/__init__/kw_args" + }, + "sklearn/sklearn.preprocessing._label/LabelBinarizer/__init__/sparse_output": { + "target": "sklearn/sklearn.preprocessing._label/LabelBinarizer/__init__/sparse_output" + }, + "sklearn/sklearn.preprocessing._label/MultiLabelBinarizer/__init__/classes": { + "target": "sklearn/sklearn.preprocessing._label/MultiLabelBinarizer/__init__/classes" + }, + "sklearn/sklearn.preprocessing._polynomial/PolynomialFeatures/fit/y": { + "target": "sklearn/sklearn.preprocessing._polynomial/PolynomialFeatures/fit/y" + }, + "sklearn/sklearn.preprocessing._polynomial/PolynomialFeatures/get_feature_names/input_features": { + "target": "sklearn/sklearn.preprocessing._polynomial/PolynomialFeatures/get_feature_names/input_features" + }, "sklearn/sklearn.random_projection/GaussianRandomProjection/__init__/n_components": { "target": "sklearn/sklearn.random_projection/GaussianRandomProjection/__init__/n_components" }, "sklearn/sklearn.random_projection/SparseRandomProjection/__init__/n_components": { "target": "sklearn/sklearn.random_projection/SparseRandomProjection/__init__/n_components" }, + "sklearn/sklearn.semi_supervised._label_propagation/LabelSpreading/__init__/n_jobs": { + "target": "sklearn/sklearn.semi_supervised._label_propagation/LabelSpreading/__init__/n_jobs" + }, + "sklearn/sklearn.svm._classes/NuSVC/__init__/kernel": { + "target": "sklearn/sklearn.svm._classes/NuSVC/__init__/kernel" + }, + "sklearn/sklearn.svm._classes/NuSVC/__init__/degree": { + "target": "sklearn/sklearn.svm._classes/NuSVC/__init__/degree" + }, + "sklearn/sklearn.svm._classes/NuSVC/__init__/gamma": { + "target": "sklearn/sklearn.svm._classes/NuSVC/__init__/gamma" + }, + "sklearn/sklearn.svm._classes/OneClassSVM/__init__/gamma": { + "target": "sklearn/sklearn.svm._classes/OneClassSVM/__init__/gamma" + }, + "sklearn/sklearn.svm._classes/OneClassSVM/__init__/nu": { + "target": "sklearn/sklearn.svm._classes/OneClassSVM/__init__/nu" + }, + "sklearn/sklearn.tree._classes/DecisionTreeClassifier/__init__/min_impurity_decrease": { + "target": "sklearn/sklearn.tree._classes/DecisionTreeClassifier/__init__/min_impurity_decrease" + }, + "sklearn/sklearn.tree._classes/DecisionTreeClassifier/fit/sample_weight": { + "target": "sklearn/sklearn.tree._classes/DecisionTreeClassifier/fit/sample_weight" + }, + "sklearn/sklearn.tree._classes/ExtraTreeRegressor/__init__/criterion": { + "target": "sklearn/sklearn.tree._classes/ExtraTreeRegressor/__init__/criterion" + }, "sklearn/sklearn.tree._export/export_graphviz/feature_names": { "target": "sklearn/sklearn.tree._export/export_graphviz/feature_names" }, @@ -11834,6 +14062,9 @@ "sklearn/sklearn.tree._export/plot_tree/ax": { "target": "sklearn/sklearn.tree._export/plot_tree/ax" }, + "sklearn/sklearn.utils.metaestimators/_safe_split/train_indices": { + "target": "sklearn/sklearn.utils.metaestimators/_safe_split/train_indices" + }, "sklearn/sklearn.utils.validation/check_array/dtype": { "target": "sklearn/sklearn.utils.validation/check_array/dtype" }, @@ -11861,16 +14092,86 @@ "defaultType": "number", "defaultValue": 10.0 }, + "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/__init__/n_clusters": { + "target": "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/__init__/n_clusters", + "defaultType": "number", + "defaultValue": 100.0 + }, + "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/__init__/batch_size": { + "target": "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/__init__/batch_size", + "defaultType": "number", + "defaultValue": 10000.0 + }, + "sklearn/sklearn.cluster._mean_shift/MeanShift/__init__/bin_seeding": { + "target": "sklearn/sklearn.cluster._mean_shift/MeanShift/__init__/bin_seeding", + "defaultType": "boolean", + "defaultValue": true + }, + "sklearn/sklearn.cluster._spectral/SpectralClustering/__init__/affinity": { + "target": "sklearn/sklearn.cluster._spectral/SpectralClustering/__init__/affinity", + "defaultType": "string", + "defaultValue": "nearest_neighbors" + }, "sklearn/sklearn.datasets._samples_generator/make_classification/n_redundant": { "target": "sklearn/sklearn.datasets._samples_generator/make_classification/n_redundant", "defaultType": "number", "defaultValue": 0.0 }, + "sklearn/sklearn.decomposition._incremental_pca/IncrementalPCA/__init__/n_components": { + "target": "sklearn/sklearn.decomposition._incremental_pca/IncrementalPCA/__init__/n_components", + "defaultType": "number", + "defaultValue": 5.0 + }, + "sklearn/sklearn.decomposition._kernel_pca/KernelPCA/__init__/n_components": { + "target": "sklearn/sklearn.decomposition._kernel_pca/KernelPCA/__init__/n_components", + "defaultType": "number", + "defaultValue": 2.0 + }, + "sklearn/sklearn.decomposition._kernel_pca/KernelPCA/__init__/kernel": { + "target": "sklearn/sklearn.decomposition._kernel_pca/KernelPCA/__init__/kernel", + "defaultType": "string", + "defaultValue": "rbf" + }, + "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/__init__/random_state": { + "target": "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/__init__/random_state", + "defaultType": "number", + "defaultValue": 0.0 + }, + "sklearn/sklearn.decomposition._nmf/NMF/__init__/random_state": { + "target": "sklearn/sklearn.decomposition._nmf/NMF/__init__/random_state", + "defaultType": "number", + "defaultValue": 1337.0 + }, + "sklearn/sklearn.decomposition._sparse_pca/SparsePCA/__init__/n_components": { + "target": "sklearn/sklearn.decomposition._sparse_pca/SparsePCA/__init__/n_components", + "defaultType": "number", + "defaultValue": 2.0 + }, + "sklearn/sklearn.decomposition._sparse_pca/SparsePCA/__init__/random_state": { + "target": "sklearn/sklearn.decomposition._sparse_pca/SparsePCA/__init__/random_state", + "defaultType": "number", + "defaultValue": 42.0 + }, "sklearn/sklearn.feature_selection._mutual_info/mutual_info_classif/random_state": { "target": "sklearn/sklearn.feature_selection._mutual_info/mutual_info_classif/random_state", "defaultType": "number", "defaultValue": 0.0 }, + "sklearn/sklearn.feature_selection._univariate_selection/GenericUnivariateSelect/__init__/mode": { + "target": "sklearn/sklearn.feature_selection._univariate_selection/GenericUnivariateSelect/__init__/mode", + "defaultType": "string", + "defaultValue": "fwe" + }, + "sklearn/sklearn.feature_selection._univariate_selection/GenericUnivariateSelect/__init__/param": { + "target": "sklearn/sklearn.feature_selection._univariate_selection/GenericUnivariateSelect/__init__/param", + "defaultType": "number", + "defaultValue": 0.001 + }, + "sklearn/sklearn.feature_selection._variance_threshold/VarianceThreshold/__init__/threshold": { + "target": "sklearn/sklearn.feature_selection._variance_threshold/VarianceThreshold/__init__/threshold", + "defaultType": "number", + "defaultValue": 1.5 + }, "sklearn/sklearn.kernel_ridge/KernelRidge/__init__/alpha": { "target": "sklearn/sklearn.kernel_ridge/KernelRidge/__init__/alpha", "defaultType": "number", @@ -11881,6 +14182,16 @@ "defaultType": "string", "defaultValue": "polynomial" }, + "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNet/__init__/random_state": { + "target": "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNet/__init__/random_state", + "defaultType": "number", + "defaultValue": 42.0 + }, + "sklearn/sklearn.linear_model._stochastic_gradient/SGDClassifier/__init__/random_state": { + "target": "sklearn/sklearn.linear_model._stochastic_gradient/SGDClassifier/__init__/random_state", + "defaultType": "number", + "defaultValue": 42.0 + }, "sklearn/sklearn.metrics._classification/cohen_kappa_score/weights": { "target": "sklearn/sklearn.metrics._classification/cohen_kappa_score/weights", "defaultType": "string", @@ -11901,6 +14212,71 @@ "defaultType": "string", "defaultValue": "f1_macro" }, + "sklearn/sklearn.mixture._gaussian_mixture/GaussianMixture/__init__/n_components": { + "target": "sklearn/sklearn.mixture._gaussian_mixture/GaussianMixture/__init__/n_components", + "defaultType": "number", + "defaultValue": 2.0 + }, + "sklearn/sklearn.model_selection._search/GridSearchCV/__init__/cv": { + "target": "sklearn/sklearn.model_selection._search/GridSearchCV/__init__/cv", + "defaultType": "number", + "defaultValue": 5.0 + }, + "sklearn/sklearn.model_selection._search/RandomizedSearchCV/__init__/cv": { + "target": "sklearn/sklearn.model_selection._search/RandomizedSearchCV/__init__/cv", + "defaultType": "number", + "defaultValue": 5.0 + }, + "sklearn/sklearn.model_selection._split/GroupShuffleSplit/__init__/n_splits": { + "target": "sklearn/sklearn.model_selection._split/GroupShuffleSplit/__init__/n_splits", + "defaultType": "number", + "defaultValue": 1.0 + }, + "sklearn/sklearn.model_selection._split/GroupShuffleSplit/__init__/test_size": { + "target": "sklearn/sklearn.model_selection._split/GroupShuffleSplit/__init__/test_size", + "defaultType": "number", + "defaultValue": 0.15 + }, + "sklearn/sklearn.model_selection._split/RepeatedStratifiedKFold/__init__/n_repeats": { + "target": "sklearn/sklearn.model_selection._split/RepeatedStratifiedKFold/__init__/n_repeats", + "defaultType": "number", + "defaultValue": 3.0 + }, + "sklearn/sklearn.model_selection._split/RepeatedStratifiedKFold/__init__/random_state": { + "target": "sklearn/sklearn.model_selection._split/RepeatedStratifiedKFold/__init__/random_state", + "defaultType": "number", + "defaultValue": 42.0 + }, + "sklearn/sklearn.model_selection._split/ShuffleSplit/__init__/n_splits": { + "target": "sklearn/sklearn.model_selection._split/ShuffleSplit/__init__/n_splits", + "defaultType": "number", + "defaultValue": 1.0 + }, + "sklearn/sklearn.model_selection._split/ShuffleSplit/__init__/random_state": { + "target": "sklearn/sklearn.model_selection._split/ShuffleSplit/__init__/random_state", + "defaultType": "number", + "defaultValue": 0.0 + }, + "sklearn/sklearn.model_selection._split/StratifiedKFold/__init__/random_state": { + "target": "sklearn/sklearn.model_selection._split/StratifiedKFold/__init__/random_state", + "defaultType": "number", + "defaultValue": 42.0 + }, + "sklearn/sklearn.model_selection._split/StratifiedShuffleSplit/__init__/n_splits": { + "target": "sklearn/sklearn.model_selection._split/StratifiedShuffleSplit/__init__/n_splits", + "defaultType": "number", + "defaultValue": 1.0 + }, + "sklearn/sklearn.model_selection._split/StratifiedShuffleSplit/__init__/test_size": { + "target": "sklearn/sklearn.model_selection._split/StratifiedShuffleSplit/__init__/test_size", + "defaultType": "number", + "defaultValue": 0.2 + }, + "sklearn/sklearn.model_selection._split/StratifiedShuffleSplit/__init__/random_state": { + "target": "sklearn/sklearn.model_selection._split/StratifiedShuffleSplit/__init__/random_state", + "defaultType": "number", + "defaultValue": 42.0 + }, "sklearn/sklearn.model_selection._split/train_test_split/test_size": { "target": "sklearn/sklearn.model_selection._split/train_test_split/test_size", "defaultType": "number", @@ -11921,6 +14297,41 @@ "defaultType": "string", "defaultValue": "max_depth" }, + "sklearn/sklearn.neighbors._distance_metric/DistanceMetric/get_metric/metric": { + "target": "sklearn/sklearn.neighbors._distance_metric/DistanceMetric/get_metric/metric", + "defaultType": "string", + "defaultValue": "minkowski" + }, + "sklearn/sklearn.neighbors._kde/KernelDensity/__init__/bandwidth": { + "target": "sklearn/sklearn.neighbors._kde/KernelDensity/__init__/bandwidth", + "defaultType": "number", + "defaultValue": 0.02 + }, + "sklearn/sklearn.preprocessing._data/QuantileTransformer/__init__/n_quantiles": { + "target": "sklearn/sklearn.preprocessing._data/QuantileTransformer/__init__/n_quantiles", + "defaultType": "number", + "defaultValue": 100.0 + }, + "sklearn/sklearn.preprocessing._data/QuantileTransformer/__init__/output_distribution": { + "target": "sklearn/sklearn.preprocessing._data/QuantileTransformer/__init__/output_distribution", + "defaultType": "string", + "defaultValue": "normal" + }, + "sklearn/sklearn.preprocessing._data/QuantileTransformer/__init__/random_state": { + "target": "sklearn/sklearn.preprocessing._data/QuantileTransformer/__init__/random_state", + "defaultType": "number", + "defaultValue": 0.0 + }, + "sklearn/sklearn.preprocessing._discretization/KBinsDiscretizer/__init__/n_bins": { + "target": "sklearn/sklearn.preprocessing._discretization/KBinsDiscretizer/__init__/n_bins", + "defaultType": "number", + "defaultValue": 10.0 + }, + "sklearn/sklearn.preprocessing._discretization/KBinsDiscretizer/__init__/encode": { + "target": "sklearn/sklearn.preprocessing._discretization/KBinsDiscretizer/__init__/encode", + "defaultType": "string", + "defaultValue": "ordinal" + }, "sklearn/sklearn.random_projection/GaussianRandomProjection/__init__/random_state": { "target": "sklearn/sklearn.random_projection/GaussianRandomProjection/__init__/random_state", "defaultType": "number", @@ -11936,6 +14347,21 @@ "defaultType": "number", "defaultValue": 420.0 }, + "sklearn/sklearn.semi_supervised._label_propagation/LabelSpreading/__init__/gamma": { + "target": "sklearn/sklearn.semi_supervised._label_propagation/LabelSpreading/__init__/gamma", + "defaultType": "number", + "defaultValue": 0.01 + }, + "sklearn/sklearn.semi_supervised._label_propagation/LabelSpreading/__init__/max_iter": { + "target": "sklearn/sklearn.semi_supervised._label_propagation/LabelSpreading/__init__/max_iter", + "defaultType": "number", + "defaultValue": 10.0 + }, + "sklearn/sklearn.svm._classes/NuSVC/__init__/probability": { + "target": "sklearn/sklearn.svm._classes/NuSVC/__init__/probability", + "defaultType": "boolean", + "defaultValue": true + }, "sklearn/sklearn.tree._export/plot_tree/filled": { "target": "sklearn/sklearn.tree._export/plot_tree/filled", "defaultType": "boolean", @@ -11978,26 +14404,6 @@ "upperLimitType": 1 } }, - "sklearn/sklearn.decomposition._pca/PCA/__init__/tol": { - "target": "sklearn/sklearn.decomposition._pca/PCA/__init__/tol", - "interval": { - "isDiscrete": false, - "lowerIntervalLimit": 0.0, - "lowerLimitType": 0, - "upperIntervalLimit": 0, - "upperLimitType": 2 - } - }, - "sklearn/sklearn.decomposition._pca/PCA/__init__/iterated_power": { - "target": "sklearn/sklearn.decomposition._pca/PCA/__init__/iterated_power", - "interval": { - "isDiscrete": false, - "lowerIntervalLimit": 0.0, - "lowerLimitType": 0, - "upperIntervalLimit": 0, - "upperLimitType": 2 - } - }, "sklearn/sklearn.ensemble._forest/ExtraTreesClassifier/__init__/max_samples": { "target": "sklearn/sklearn.ensemble._forest/ExtraTreesClassifier/__init__/max_samples", "interval": { @@ -12088,16 +14494,6 @@ "upperLimitType": 0 } }, - "sklearn/sklearn.ensemble._gb/GradientBoostingClassifier/__init__/validation_fraction": { - "target": "sklearn/sklearn.ensemble._gb/GradientBoostingClassifier/__init__/validation_fraction", - "interval": { - "isDiscrete": false, - "lowerIntervalLimit": 0.0, - "lowerLimitType": 1, - "upperIntervalLimit": 1.0, - "upperLimitType": 1 - } - }, "sklearn/sklearn.ensemble._gb/GradientBoostingRegressor/__init__/subsample": { "target": "sklearn/sklearn.ensemble._gb/GradientBoostingRegressor/__init__/subsample", "interval": { @@ -12416,20 +14812,6 @@ } ] }, - "sklearn/sklearn.cluster._affinity_propagation/AffinityPropagation/__init__/affinity": { - "target": "sklearn/sklearn.cluster._affinity_propagation/AffinityPropagation/__init__/affinity", - "enumName": "Affinity", - "pairs": [ - { - "stringValue": "euclidean", - "instanceName": "Euclidean" - }, - { - "stringValue": "precomputed", - "instanceName": "Precomputed" - } - ] - }, "sklearn/sklearn.cluster._agglomerative/AgglomerativeClustering/__init__/linkage": { "target": "sklearn/sklearn.cluster._agglomerative/AgglomerativeClustering/__init__/linkage", "enumName": "Linkage", @@ -12452,28 +14834,6 @@ } ] }, - "sklearn/sklearn.cluster._agglomerative/FeatureAgglomeration/__init__/linkage": { - "target": "sklearn/sklearn.cluster._agglomerative/FeatureAgglomeration/__init__/linkage", - "enumName": "Linkage", - "pairs": [ - { - "stringValue": "average", - "instanceName": "Average" - }, - { - "stringValue": "complete", - "instanceName": "Complete" - }, - { - "stringValue": "single", - "instanceName": "Single" - }, - { - "stringValue": "ward", - "instanceName": "Ward" - } - ] - }, "sklearn/sklearn.cluster._agglomerative/_fix_connectivity/affinity": { "target": "sklearn/sklearn.cluster._agglomerative/_fix_connectivity/affinity", "enumName": "Affinity", @@ -12666,53 +15026,17 @@ } ] }, - "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/__init__/init": { - "target": "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/__init__/init", - "enumName": "Init", - "pairs": [ - { - "stringValue": "k-means++", - "instanceName": "Kmeans" - }, - { - "stringValue": "random", - "instanceName": "Random" - } - ] - }, "sklearn/sklearn.cluster._kmeans/_BaseKMeans/_init_centroids/init": { - "target": "sklearn/sklearn.cluster._kmeans/_BaseKMeans/_init_centroids/init", - "enumName": "Init", - "pairs": [ - { - "stringValue": "k-means++", - "instanceName": "Kmeans" - }, - { - "stringValue": "random", - "instanceName": "Random" - } - ] - }, - "sklearn/sklearn.cluster._optics/OPTICS/__init__/algorithm": { - "target": "sklearn/sklearn.cluster._optics/OPTICS/__init__/algorithm", - "enumName": "Algorithm", - "pairs": [ - { - "stringValue": "auto", - "instanceName": "Auto" - }, - { - "stringValue": "ball_tree", - "instanceName": "BallTree" - }, + "target": "sklearn/sklearn.cluster._kmeans/_BaseKMeans/_init_centroids/init", + "enumName": "Init", + "pairs": [ { - "stringValue": "brute", - "instanceName": "Brute" + "stringValue": "k-means++", + "instanceName": "Kmeans" }, { - "stringValue": "kd_tree", - "instanceName": "KdTree" + "stringValue": "random", + "instanceName": "Random" } ] }, @@ -12852,34 +15176,6 @@ } ] }, - "sklearn/sklearn.covariance._graph_lasso/GraphicalLasso/__init__/mode": { - "target": "sklearn/sklearn.covariance._graph_lasso/GraphicalLasso/__init__/mode", - "enumName": "Mode", - "pairs": [ - { - "stringValue": "cd", - "instanceName": "Cd" - }, - { - "stringValue": "lars", - "instanceName": "Lars" - } - ] - }, - "sklearn/sklearn.covariance._graph_lasso/GraphicalLassoCV/__init__/mode": { - "target": "sklearn/sklearn.covariance._graph_lasso/GraphicalLassoCV/__init__/mode", - "enumName": "Mode", - "pairs": [ - { - "stringValue": "cd", - "instanceName": "Cd" - }, - { - "stringValue": "lars", - "instanceName": "Lars" - } - ] - }, "sklearn/sklearn.covariance._graph_lasso/graphical_lasso/mode": { "target": "sklearn/sklearn.covariance._graph_lasso/graphical_lasso/mode", "enumName": "Mode", @@ -12922,56 +15218,6 @@ } ] }, - "sklearn/sklearn.datasets._arff_parser/_pandas_arff_parser/output_type": { - "target": "sklearn/sklearn.datasets._arff_parser/_pandas_arff_parser/output_type", - "enumName": "OutputType", - "pairs": [ - { - "stringValue": "numpy", - "instanceName": "Numpy" - }, - { - "stringValue": "pandas", - "instanceName": "Pandas" - }, - { - "stringValue": "sparse", - "instanceName": "Sparse" - } - ] - }, - "sklearn/sklearn.datasets._arff_parser/load_arff_from_gzip_file/parser": { - "target": "sklearn/sklearn.datasets._arff_parser/load_arff_from_gzip_file/parser", - "enumName": "Parser", - "pairs": [ - { - "stringValue": "liac-arff", - "instanceName": "Liacarff" - }, - { - "stringValue": "pandas", - "instanceName": "Pandas" - } - ] - }, - "sklearn/sklearn.datasets._arff_parser/load_arff_from_gzip_file/output_type": { - "target": "sklearn/sklearn.datasets._arff_parser/load_arff_from_gzip_file/output_type", - "enumName": "OutputType", - "pairs": [ - { - "stringValue": "numpy", - "instanceName": "Numpy" - }, - { - "stringValue": "pandas", - "instanceName": "Pandas" - }, - { - "stringValue": "sparse", - "instanceName": "Sparse" - } - ] - }, "sklearn/sklearn.datasets._kddcup99/fetch_kddcup99/subset": { "target": "sklearn/sklearn.datasets._kddcup99/fetch_kddcup99/subset", "enumName": "Subset", @@ -13012,52 +15258,6 @@ } ] }, - "sklearn/sklearn.datasets._openml/_download_data_to_bunch/parser": { - "target": "sklearn/sklearn.datasets._openml/_download_data_to_bunch/parser", - "enumName": "Parser", - "pairs": [ - { - "stringValue": "liac-arff", - "instanceName": "Liacarff" - }, - { - "stringValue": "pandas", - "instanceName": "Pandas" - } - ] - }, - "sklearn/sklearn.datasets._openml/_load_arff_response/parser": { - "target": "sklearn/sklearn.datasets._openml/_load_arff_response/parser", - "enumName": "Parser", - "pairs": [ - { - "stringValue": "liac-arff", - "instanceName": "Liacarff" - }, - { - "stringValue": "pandas", - "instanceName": "Pandas" - } - ] - }, - "sklearn/sklearn.datasets._openml/_load_arff_response/output_type": { - "target": "sklearn/sklearn.datasets._openml/_load_arff_response/output_type", - "enumName": "OutputType", - "pairs": [ - { - "stringValue": "numpy", - "instanceName": "Numpy" - }, - { - "stringValue": "pandas", - "instanceName": "Pandas" - }, - { - "stringValue": "sparse", - "instanceName": "Sparse" - } - ] - }, "sklearn/sklearn.datasets._rcv1/fetch_rcv1/subset": { "target": "sklearn/sklearn.datasets._rcv1/fetch_rcv1/subset", "enumName": "Subset", @@ -13152,46 +15352,6 @@ } ] }, - "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__/fit_algorithm": { - "target": "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__/fit_algorithm", - "enumName": "FitAlgorithm", - "pairs": [ - { - "stringValue": "cd", - "instanceName": "Cd" - }, - { - "stringValue": "lars", - "instanceName": "Lars" - } - ] - }, - "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__/transform_algorithm": { - "target": "sklearn/sklearn.decomposition._dict_learning/MiniBatchDictionaryLearning/__init__/transform_algorithm", - "enumName": "TransformAlgorithm", - "pairs": [ - { - "stringValue": "lars", - "instanceName": "Lars" - }, - { - "stringValue": "lasso_cd", - "instanceName": "LassoCd" - }, - { - "stringValue": "lasso_lars", - "instanceName": "LassoLars" - }, - { - "stringValue": "omp", - "instanceName": "Omp" - }, - { - "stringValue": "threshold", - "instanceName": "Threshold" - } - ] - }, "sklearn/sklearn.decomposition._dict_learning/SparseCoder/__init__/transform_algorithm": { "target": "sklearn/sklearn.decomposition._dict_learning/SparseCoder/__init__/transform_algorithm", "enumName": "TransformAlgorithm", @@ -13298,20 +15458,6 @@ } ] }, - "sklearn/sklearn.decomposition._factor_analysis/FactorAnalysis/__init__/svd_method": { - "target": "sklearn/sklearn.decomposition._factor_analysis/FactorAnalysis/__init__/svd_method", - "enumName": "SvdMethod", - "pairs": [ - { - "stringValue": "lapack", - "instanceName": "Lapack" - }, - { - "stringValue": "randomized", - "instanceName": "Randomized" - } - ] - }, "sklearn/sklearn.decomposition._factor_analysis/FactorAnalysis/__init__/rotation": { "target": "sklearn/sklearn.decomposition._factor_analysis/FactorAnalysis/__init__/rotation", "enumName": "Rotation", @@ -13326,38 +15472,6 @@ } ] }, - "sklearn/sklearn.decomposition._fastica/FastICA/__init__/algorithm": { - "target": "sklearn/sklearn.decomposition._fastica/FastICA/__init__/algorithm", - "enumName": "Algorithm", - "pairs": [ - { - "stringValue": "deflation", - "instanceName": "Deflation" - }, - { - "stringValue": "parallel", - "instanceName": "Parallel" - } - ] - }, - "sklearn/sklearn.decomposition._fastica/FastICA/__init__/fun": { - "target": "sklearn/sklearn.decomposition._fastica/FastICA/__init__/fun", - "enumName": "Fun", - "pairs": [ - { - "stringValue": "cube", - "instanceName": "Cube" - }, - { - "stringValue": "exp", - "instanceName": "Exp" - }, - { - "stringValue": "logcosh", - "instanceName": "Logcosh" - } - ] - }, "sklearn/sklearn.decomposition._fastica/fastica/algorithm": { "target": "sklearn/sklearn.decomposition._fastica/fastica/algorithm", "enumName": "Algorithm", @@ -13420,28 +15534,6 @@ } ] }, - "sklearn/sklearn.decomposition._kernel_pca/KernelPCA/__init__/eigen_solver": { - "target": "sklearn/sklearn.decomposition._kernel_pca/KernelPCA/__init__/eigen_solver", - "enumName": "EigenSolver", - "pairs": [ - { - "stringValue": "arpack", - "instanceName": "Arpack" - }, - { - "stringValue": "auto", - "instanceName": "Auto" - }, - { - "stringValue": "dense", - "instanceName": "Dense" - }, - { - "stringValue": "randomized", - "instanceName": "Randomized" - } - ] - }, "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/__init__/learning_method": { "target": "sklearn/sklearn.decomposition._lda/LatentDirichletAllocation/__init__/learning_method", "enumName": "LearningMethod", @@ -13558,24 +15650,6 @@ } ] }, - "sklearn/sklearn.decomposition._nmf/NMF/__init__/regularization": { - "target": "sklearn/sklearn.decomposition._nmf/NMF/__init__/regularization", - "enumName": "Regularization", - "pairs": [ - { - "stringValue": "both", - "instanceName": "Both" - }, - { - "stringValue": "components", - "instanceName": "Components" - }, - { - "stringValue": "transformation", - "instanceName": "Transformation" - } - ] - }, "sklearn/sklearn.decomposition._nmf/_beta_divergence/beta": { "target": "sklearn/sklearn.decomposition._nmf/_beta_divergence/beta", "enumName": "Beta", @@ -13746,20 +15820,6 @@ } ] }, - "sklearn/sklearn.decomposition._sparse_pca/SparsePCA/__init__/method": { - "target": "sklearn/sklearn.decomposition._sparse_pca/SparsePCA/__init__/method", - "enumName": "Method", - "pairs": [ - { - "stringValue": "cd", - "instanceName": "Cd" - }, - { - "stringValue": "lars", - "instanceName": "Lars" - } - ] - }, "sklearn/sklearn.decomposition._truncated_svd/TruncatedSVD/__init__/algorithm": { "target": "sklearn/sklearn.decomposition._truncated_svd/TruncatedSVD/__init__/algorithm", "enumName": "Algorithm", @@ -14500,20 +16560,6 @@ } ] }, - "sklearn/sklearn.feature_selection._sequential/SequentialFeatureSelector/__init__/direction": { - "target": "sklearn/sklearn.feature_selection._sequential/SequentialFeatureSelector/__init__/direction", - "enumName": "Direction", - "pairs": [ - { - "stringValue": "backward", - "instanceName": "Backward" - }, - { - "stringValue": "forward", - "instanceName": "Forward" - } - ] - }, "sklearn/sklearn.feature_selection._univariate_selection/GenericUnivariateSelect/__init__/mode": { "target": "sklearn/sklearn.feature_selection._univariate_selection/GenericUnivariateSelect/__init__/mode", "enumName": "Mode", @@ -14528,29 +16574,15 @@ }, { "stringValue": "fwe", - "instanceName": "Fwe" - }, - { - "stringValue": "k_best", - "instanceName": "KBest" - }, - { - "stringValue": "percentile", - "instanceName": "Percentile" - } - ] - }, - "sklearn/sklearn.gaussian_process._gpc/GaussianProcessClassifier/__init__/multi_class": { - "target": "sklearn/sklearn.gaussian_process._gpc/GaussianProcessClassifier/__init__/multi_class", - "enumName": "MultiClass", - "pairs": [ + "instanceName": "Fwe" + }, { - "stringValue": "one_vs_one", - "instanceName": "OneVsOne" + "stringValue": "k_best", + "instanceName": "KBest" }, { - "stringValue": "one_vs_rest", - "instanceName": "OneVsRest" + "stringValue": "percentile", + "instanceName": "Percentile" } ] }, @@ -14596,20 +16628,6 @@ } ] }, - "sklearn/sklearn.impute._base/MissingIndicator/__init__/features": { - "target": "sklearn/sklearn.impute._base/MissingIndicator/__init__/features", - "enumName": "Features", - "pairs": [ - { - "stringValue": "all", - "instanceName": "All" - }, - { - "stringValue": "missing-only", - "instanceName": "Missingonly" - } - ] - }, "sklearn/sklearn.impute._iterative/IterativeImputer/__init__/initial_strategy": { "target": "sklearn/sklearn.impute._iterative/IterativeImputer/__init__/initial_strategy", "enumName": "InitialStrategy", @@ -14672,16 +16690,6 @@ } ] }, - "sklearn/sklearn.impute._knn/KNNImputer/__init__/metric": { - "target": "sklearn/sklearn.impute._knn/KNNImputer/__init__/metric", - "enumName": "Metric", - "pairs": [ - { - "stringValue": "nan_euclidean", - "instanceName": "NanEuclidean" - } - ] - }, "sklearn/sklearn.inspection._plot.decision_boundary/DecisionBoundaryDisplay/from_estimator/plot_method": { "target": "sklearn/sklearn.inspection._plot.decision_boundary/DecisionBoundaryDisplay/from_estimator/plot_method", "enumName": "PlotMethod", @@ -14848,20 +16856,6 @@ } ] }, - "sklearn/sklearn.linear_model._coordinate_descent/ElasticNetCV/__init__/selection": { - "target": "sklearn/sklearn.linear_model._coordinate_descent/ElasticNetCV/__init__/selection", - "enumName": "Selection", - "pairs": [ - { - "stringValue": "cyclic", - "instanceName": "Cyclic" - }, - { - "stringValue": "random", - "instanceName": "Random" - } - ] - }, "sklearn/sklearn.linear_model._coordinate_descent/Lasso/__init__/selection": { "target": "sklearn/sklearn.linear_model._coordinate_descent/Lasso/__init__/selection", "enumName": "Selection", @@ -14904,20 +16898,6 @@ } ] }, - "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNetCV/__init__/selection": { - "target": "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskElasticNetCV/__init__/selection", - "enumName": "Selection", - "pairs": [ - { - "stringValue": "cyclic", - "instanceName": "Cyclic" - }, - { - "stringValue": "random", - "instanceName": "Random" - } - ] - }, "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskLasso/__init__/selection": { "target": "sklearn/sklearn.linear_model._coordinate_descent/MultiTaskLasso/__init__/selection", "enumName": "Selection", @@ -15310,24 +17290,6 @@ } ] }, - "sklearn/sklearn.linear_model._perceptron/Perceptron/__init__/penalty": { - "target": "sklearn/sklearn.linear_model._perceptron/Perceptron/__init__/penalty", - "enumName": "Penalty", - "pairs": [ - { - "stringValue": "elasticnet", - "instanceName": "Elasticnet" - }, - { - "stringValue": "l1", - "instanceName": "L" - }, - { - "stringValue": "l2", - "instanceName": "L" - } - ] - }, "sklearn/sklearn.linear_model._quantile/QuantileRegressor/__init__/solver": { "target": "sklearn/sklearn.linear_model._quantile/QuantileRegressor/__init__/solver", "enumName": "Solver", @@ -15626,46 +17588,6 @@ } ] }, - "sklearn/sklearn.manifold._isomap/Isomap/__init__/path_method": { - "target": "sklearn/sklearn.manifold._isomap/Isomap/__init__/path_method", - "enumName": "PathMethod", - "pairs": [ - { - "stringValue": "D", - "instanceName": "D" - }, - { - "stringValue": "FW", - "instanceName": "Fw" - }, - { - "stringValue": "auto", - "instanceName": "Auto" - } - ] - }, - "sklearn/sklearn.manifold._isomap/Isomap/__init__/neighbors_algorithm": { - "target": "sklearn/sklearn.manifold._isomap/Isomap/__init__/neighbors_algorithm", - "enumName": "NeighborsAlgorithm", - "pairs": [ - { - "stringValue": "auto", - "instanceName": "Auto" - }, - { - "stringValue": "ball_tree", - "instanceName": "BallTree" - }, - { - "stringValue": "brute", - "instanceName": "Brute" - }, - { - "stringValue": "kd_tree", - "instanceName": "KdTree" - } - ] - }, "sklearn/sklearn.manifold._locally_linear/LocallyLinearEmbedding/__init__/eigen_solver": { "target": "sklearn/sklearn.manifold._locally_linear/LocallyLinearEmbedding/__init__/eigen_solver", "enumName": "EigenSolver", @@ -15706,28 +17628,6 @@ } ] }, - "sklearn/sklearn.manifold._locally_linear/LocallyLinearEmbedding/__init__/neighbors_algorithm": { - "target": "sklearn/sklearn.manifold._locally_linear/LocallyLinearEmbedding/__init__/neighbors_algorithm", - "enumName": "NeighborsAlgorithm", - "pairs": [ - { - "stringValue": "auto", - "instanceName": "Auto" - }, - { - "stringValue": "ball_tree", - "instanceName": "BallTree" - }, - { - "stringValue": "brute", - "instanceName": "Brute" - }, - { - "stringValue": "kd_tree", - "instanceName": "KdTree" - } - ] - }, "sklearn/sklearn.manifold._locally_linear/locally_linear_embedding/eigen_solver": { "target": "sklearn/sklearn.manifold._locally_linear/locally_linear_embedding/eigen_solver", "enumName": "EigenSolver", @@ -15822,24 +17722,6 @@ } ] }, - "sklearn/sklearn.manifold._spectral_embedding/SpectralEmbedding/__init__/eigen_solver": { - "target": "sklearn/sklearn.manifold._spectral_embedding/SpectralEmbedding/__init__/eigen_solver", - "enumName": "EigenSolver", - "pairs": [ - { - "stringValue": "amg", - "instanceName": "Amg" - }, - { - "stringValue": "arpack", - "instanceName": "Arpack" - }, - { - "stringValue": "lobpcg", - "instanceName": "Lobpcg" - } - ] - }, "sklearn/sklearn.manifold._spectral_embedding/spectral_embedding/eigen_solver": { "target": "sklearn/sklearn.manifold._spectral_embedding/spectral_embedding/eigen_solver", "enumName": "EigenSolver", @@ -16456,50 +18338,6 @@ } ] }, - "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/__init__/covariance_type": { - "target": "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/__init__/covariance_type", - "enumName": "CovarianceType", - "pairs": [ - { - "stringValue": "diag", - "instanceName": "Diag" - }, - { - "stringValue": "full", - "instanceName": "Full" - }, - { - "stringValue": "spherical", - "instanceName": "Spherical" - }, - { - "stringValue": "tied", - "instanceName": "Tied" - } - ] - }, - "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/__init__/init_params": { - "target": "sklearn/sklearn.mixture._bayesian_mixture/BayesianGaussianMixture/__init__/init_params", - "enumName": "InitParams", - "pairs": [ - { - "stringValue": "k-means++", - "instanceName": "Kmeans" - }, - { - "stringValue": "kmeans", - "instanceName": "Kmeans" - }, - { - "stringValue": "random", - "instanceName": "Random" - }, - { - "stringValue": "random_from_data", - "instanceName": "RandomFromData" - } - ] - }, "sklearn/sklearn.mixture._gaussian_mixture/GaussianMixture/__init__/covariance_type": { "target": "sklearn/sklearn.mixture._gaussian_mixture/GaussianMixture/__init__/covariance_type", "enumName": "CovarianceType", @@ -16646,20 +18484,6 @@ } ] }, - "sklearn/sklearn.model_selection._search_successive_halving/HalvingRandomSearchCV/__init__/min_resources": { - "target": "sklearn/sklearn.model_selection._search_successive_halving/HalvingRandomSearchCV/__init__/min_resources", - "enumName": "MinResources", - "pairs": [ - { - "stringValue": "exhaust", - "instanceName": "Exhaust" - }, - { - "stringValue": "smallest", - "instanceName": "Smallest" - } - ] - }, "sklearn/sklearn.model_selection._validation/cross_val_predict/method": { "target": "sklearn/sklearn.model_selection._validation/cross_val_predict/method", "enumName": "Method", @@ -16696,72 +18520,22 @@ } ] }, - "sklearn/sklearn.neighbors._base/RadiusNeighborsMixin/radius_neighbors_graph/mode": { - "target": "sklearn/sklearn.neighbors._base/RadiusNeighborsMixin/radius_neighbors_graph/mode", - "enumName": "Mode", - "pairs": [ - { - "stringValue": "connectivity", - "instanceName": "Connectivity" - }, - { - "stringValue": "distance", - "instanceName": "Distance" - } - ] - }, "sklearn/sklearn.neighbors._base/_get_weights/weights": { "target": "sklearn/sklearn.neighbors._base/_get_weights/weights", "enumName": "Weights", "pairs": [ { - "stringValue": "distance", - "instanceName": "Distance" - }, - { - "stringValue": "uniform", - "instanceName": "Uniform" - } - ] - }, - "sklearn/sklearn.neighbors._classification/KNeighborsClassifier/__init__/weights": { - "target": "sklearn/sklearn.neighbors._classification/KNeighborsClassifier/__init__/weights", - "enumName": "Weights", - "pairs": [ - { - "stringValue": "distance", - "instanceName": "Distance" - }, - { - "stringValue": "uniform", - "instanceName": "Uniform" - } - ] - }, - "sklearn/sklearn.neighbors._classification/KNeighborsClassifier/__init__/algorithm": { - "target": "sklearn/sklearn.neighbors._classification/KNeighborsClassifier/__init__/algorithm", - "enumName": "Algorithm", - "pairs": [ - { - "stringValue": "auto", - "instanceName": "Auto" - }, - { - "stringValue": "ball_tree", - "instanceName": "BallTree" - }, - { - "stringValue": "brute", - "instanceName": "Brute" + "stringValue": "distance", + "instanceName": "Distance" }, { - "stringValue": "kd_tree", - "instanceName": "KdTree" + "stringValue": "uniform", + "instanceName": "Uniform" } ] }, - "sklearn/sklearn.neighbors._classification/RadiusNeighborsClassifier/__init__/weights": { - "target": "sklearn/sklearn.neighbors._classification/RadiusNeighborsClassifier/__init__/weights", + "sklearn/sklearn.neighbors._classification/KNeighborsClassifier/__init__/weights": { + "target": "sklearn/sklearn.neighbors._classification/KNeighborsClassifier/__init__/weights", "enumName": "Weights", "pairs": [ { @@ -16774,8 +18548,8 @@ } ] }, - "sklearn/sklearn.neighbors._classification/RadiusNeighborsClassifier/__init__/algorithm": { - "target": "sklearn/sklearn.neighbors._classification/RadiusNeighborsClassifier/__init__/algorithm", + "sklearn/sklearn.neighbors._classification/KNeighborsClassifier/__init__/algorithm": { + "target": "sklearn/sklearn.neighbors._classification/KNeighborsClassifier/__init__/algorithm", "enumName": "Algorithm", "pairs": [ { @@ -16796,16 +18570,6 @@ } ] }, - "sklearn/sklearn.neighbors._classification/RadiusNeighborsClassifier/__init__/outlier_label": { - "target": "sklearn/sklearn.neighbors._classification/RadiusNeighborsClassifier/__init__/outlier_label", - "enumName": "OutlierLabel", - "pairs": [ - { - "stringValue": "most_frequent", - "instanceName": "MostFrequent" - } - ] - }, "sklearn/sklearn.neighbors._graph/KNeighborsTransformer/__init__/mode": { "target": "sklearn/sklearn.neighbors._graph/KNeighborsTransformer/__init__/mode", "enumName": "Mode", @@ -16892,38 +18656,6 @@ } ] }, - "sklearn/sklearn.neighbors._kde/KernelDensity/__init__/bandwidth": { - "target": "sklearn/sklearn.neighbors._kde/KernelDensity/__init__/bandwidth", - "enumName": "Bandwidth", - "pairs": [ - { - "stringValue": "scott", - "instanceName": "Scott" - }, - { - "stringValue": "silverman", - "instanceName": "Silverman" - } - ] - }, - "sklearn/sklearn.neighbors._kde/KernelDensity/__init__/algorithm": { - "target": "sklearn/sklearn.neighbors._kde/KernelDensity/__init__/algorithm", - "enumName": "Algorithm", - "pairs": [ - { - "stringValue": "auto", - "instanceName": "Auto" - }, - { - "stringValue": "ball_tree", - "instanceName": "BallTree" - }, - { - "stringValue": "kd_tree", - "instanceName": "KdTree" - } - ] - }, "sklearn/sklearn.neighbors._kde/KernelDensity/__init__/kernel": { "target": "sklearn/sklearn.neighbors._kde/KernelDensity/__init__/kernel", "enumName": "Kernel", @@ -16954,54 +18686,6 @@ } ] }, - "sklearn/sklearn.neighbors._lof/LocalOutlierFactor/__init__/algorithm": { - "target": "sklearn/sklearn.neighbors._lof/LocalOutlierFactor/__init__/algorithm", - "enumName": "Algorithm", - "pairs": [ - { - "stringValue": "auto", - "instanceName": "Auto" - }, - { - "stringValue": "ball_tree", - "instanceName": "BallTree" - }, - { - "stringValue": "brute", - "instanceName": "Brute" - }, - { - "stringValue": "kd_tree", - "instanceName": "KdTree" - } - ] - }, - "sklearn/sklearn.neighbors._nca/NeighborhoodComponentsAnalysis/__init__/init": { - "target": "sklearn/sklearn.neighbors._nca/NeighborhoodComponentsAnalysis/__init__/init", - "enumName": "Init", - "pairs": [ - { - "stringValue": "auto", - "instanceName": "Auto" - }, - { - "stringValue": "identity", - "instanceName": "Identity" - }, - { - "stringValue": "lda", - "instanceName": "Lda" - }, - { - "stringValue": "pca", - "instanceName": "Pca" - }, - { - "stringValue": "random", - "instanceName": "Random" - } - ] - }, "sklearn/sklearn.neighbors._regression/KNeighborsRegressor/__init__/weights": { "target": "sklearn/sklearn.neighbors._regression/KNeighborsRegressor/__init__/weights", "enumName": "Weights", @@ -17052,28 +18736,6 @@ } ] }, - "sklearn/sklearn.neighbors._regression/RadiusNeighborsRegressor/__init__/algorithm": { - "target": "sklearn/sklearn.neighbors._regression/RadiusNeighborsRegressor/__init__/algorithm", - "enumName": "Algorithm", - "pairs": [ - { - "stringValue": "auto", - "instanceName": "Auto" - }, - { - "stringValue": "ball_tree", - "instanceName": "BallTree" - }, - { - "stringValue": "brute", - "instanceName": "Brute" - }, - { - "stringValue": "kd_tree", - "instanceName": "KdTree" - } - ] - }, "sklearn/sklearn.neighbors._unsupervised/NearestNeighbors/__init__/algorithm": { "target": "sklearn/sklearn.neighbors._unsupervised/NearestNeighbors/__init__/algorithm", "enumName": "Algorithm", @@ -17390,20 +19052,6 @@ } ] }, - "sklearn/sklearn.preprocessing._polynomial/PolynomialFeatures/__init__/order": { - "target": "sklearn/sklearn.preprocessing._polynomial/PolynomialFeatures/__init__/order", - "enumName": "Order", - "pairs": [ - { - "stringValue": "C", - "instanceName": "C" - }, - { - "stringValue": "F", - "instanceName": "F" - } - ] - }, "sklearn/sklearn.preprocessing._polynomial/SplineTransformer/__init__/knots": { "target": "sklearn/sklearn.preprocessing._polynomial/SplineTransformer/__init__/knots", "enumName": "Knots", @@ -17472,20 +19120,6 @@ } ] }, - "sklearn/sklearn.semi_supervised._label_propagation/LabelPropagation/__init__/kernel": { - "target": "sklearn/sklearn.semi_supervised._label_propagation/LabelPropagation/__init__/kernel", - "enumName": "Kernel", - "pairs": [ - { - "stringValue": "knn", - "instanceName": "Knn" - }, - { - "stringValue": "rbf", - "instanceName": "Rbf" - } - ] - }, "sklearn/sklearn.semi_supervised._label_propagation/LabelSpreading/__init__/kernel": { "target": "sklearn/sklearn.semi_supervised._label_propagation/LabelSpreading/__init__/kernel", "enumName": "Kernel", @@ -17674,30 +19308,6 @@ } ] }, - "sklearn/sklearn.svm._classes/NuSVC/__init__/class_weight": { - "target": "sklearn/sklearn.svm._classes/NuSVC/__init__/class_weight", - "enumName": "ClassWeight", - "pairs": [ - { - "stringValue": "balanced", - "instanceName": "Balanced" - } - ] - }, - "sklearn/sklearn.svm._classes/NuSVC/__init__/decision_function_shape": { - "target": "sklearn/sklearn.svm._classes/NuSVC/__init__/decision_function_shape", - "enumName": "DecisionFunctionShape", - "pairs": [ - { - "stringValue": "ovo", - "instanceName": "Ovo" - }, - { - "stringValue": "ovr", - "instanceName": "Ovr" - } - ] - }, "sklearn/sklearn.svm._classes/NuSVR/__init__/kernel": { "target": "sklearn/sklearn.svm._classes/NuSVR/__init__/kernel", "enumName": "Kernel", @@ -17738,32 +19348,6 @@ } ] }, - "sklearn/sklearn.svm._classes/OneClassSVM/__init__/kernel": { - "target": "sklearn/sklearn.svm._classes/OneClassSVM/__init__/kernel", - "enumName": "Kernel", - "pairs": [ - { - "stringValue": "linear", - "instanceName": "Linear" - }, - { - "stringValue": "poly", - "instanceName": "Poly" - }, - { - "stringValue": "precomputed", - "instanceName": "Precomputed" - }, - { - "stringValue": "rbf", - "instanceName": "Rbf" - }, - { - "stringValue": "sigmoid", - "instanceName": "Sigmoid" - } - ] - }, "sklearn/sklearn.svm._classes/OneClassSVM/__init__/gamma": { "target": "sklearn/sklearn.svm._classes/OneClassSVM/__init__/gamma", "enumName": "Gamma", @@ -17994,20 +19578,6 @@ } ] }, - "sklearn/sklearn.tree._classes/ExtraTreeClassifier/__init__/splitter": { - "target": "sklearn/sklearn.tree._classes/ExtraTreeClassifier/__init__/splitter", - "enumName": "Splitter", - "pairs": [ - { - "stringValue": "best", - "instanceName": "Best" - }, - { - "stringValue": "random", - "instanceName": "Random" - } - ] - }, "sklearn/sklearn.tree._classes/ExtraTreeClassifier/__init__/max_features": { "target": "sklearn/sklearn.tree._classes/ExtraTreeClassifier/__init__/max_features", "enumName": "MaxFeatures", @@ -18054,24 +19624,6 @@ } ] }, - "sklearn/sklearn.tree._classes/ExtraTreeRegressor/__init__/max_features": { - "target": "sklearn/sklearn.tree._classes/ExtraTreeRegressor/__init__/max_features", - "enumName": "MaxFeatures", - "pairs": [ - { - "stringValue": "auto", - "instanceName": "Auto" - }, - { - "stringValue": "log2", - "instanceName": "Log" - }, - { - "stringValue": "sqrt", - "instanceName": "Sqrt" - } - ] - }, "sklearn/sklearn.tree._export/plot_tree/label": { "target": "sklearn/sklearn.tree._export/plot_tree/label", "enumName": "Label", @@ -18108,28 +19660,6 @@ } ] }, - "sklearn/sklearn.utils._param_validation/Interval/__init__/closed": { - "target": "sklearn/sklearn.utils._param_validation/Interval/__init__/closed", - "enumName": "Closed", - "pairs": [ - { - "stringValue": "both", - "instanceName": "Both" - }, - { - "stringValue": "left", - "instanceName": "Left" - }, - { - "stringValue": "neither", - "instanceName": "Neither" - }, - { - "stringValue": "right", - "instanceName": "Right" - } - ] - }, "sklearn/sklearn.utils._testing/_convert_container/constructor_name": { "target": "sklearn/sklearn.utils._testing/_convert_container/constructor_name", "enumName": "ConstructorName", @@ -18256,20 +19786,6 @@ } ] }, - "sklearn/sklearn.utils.extmath/randomized_svd/svd_lapack_driver": { - "target": "sklearn/sklearn.utils.extmath/randomized_svd/svd_lapack_driver", - "enumName": "SvdLapackDriver", - "pairs": [ - { - "stringValue": "gesdd", - "instanceName": "Gesdd" - }, - { - "stringValue": "gesvd", - "instanceName": "Gesvd" - } - ] - }, "sklearn/sklearn.utils.graph/_fix_connected_components/mode": { "target": "sklearn/sklearn.utils.graph/_fix_connected_components/mode", "enumName": "Mode", diff --git a/data/api/sklearn__api.json b/data/api/sklearn__api.json index 7b95cec69..f04778ea1 100644 --- a/data/api/sklearn__api.json +++ b/data/api/sklearn__api.json @@ -1,7 +1,7 @@ { - "distribution": "", + "distribution": "scikit-learn", "package": "sklearn", - "version": "0.7.12", + "version": "1.1.1", "modules": [ { "id": "sklearn/sklearn", @@ -293,8 +293,23 @@ { "id": "sklearn/sklearn._distributor_init", "name": "sklearn._distributor_init", - "imports": [], - "from_imports": [], + "imports": [ + { + "module": "os", + "alias": null + }, + { + "module": "os.path", + "alias": "op" + } + ], + "from_imports": [ + { + "module": "ctypes", + "declaration": "WinDLL", + "alias": null + } + ], "classes": [], "functions": [] }, @@ -614,13 +629,7 @@ "alias": null } ], - "from_imports": [ - { - "module": "collections", - "declaration": "defaultdict", - "alias": null - } - ], + "from_imports": [], "classes": [], "functions": [] }, @@ -714,19 +723,14 @@ "declaration": "check_is_fitted", "alias": null }, - { - "module": "sklearn.utils.validation", - "declaration": "_get_feature_names", - "alias": null - }, { "module": "sklearn.utils._estimator_html_repr", "declaration": "estimator_html_repr", "alias": null }, { - "module": "sklearn.utils._param_validation", - "declaration": "validate_parameter_constraints", + "module": "sklearn.utils.validation", + "declaration": "_get_feature_names", "alias": null } ], @@ -1566,8 +1570,8 @@ "alias": null }, { - "module": "sklearn.utils._param_validation", - "declaration": "StrOptions", + "module": "sklearn.utils.validation", + "declaration": "_is_arraylike_not_scalar", "alias": null } ], @@ -1691,16 +1695,6 @@ "declaration": "abstractmethod", "alias": null }, - { - "module": "numbers", - "declaration": "Integral", - "alias": null - }, - { - "module": "numbers", - "declaration": "Real", - "alias": null - }, { "module": "sklearn.base", "declaration": "BaseEstimator", @@ -1786,21 +1780,6 @@ "declaration": "_is_arraylike_not_scalar", "alias": null }, - { - "module": "sklearn.utils._param_validation", - "declaration": "Interval", - "alias": null - }, - { - "module": "sklearn.utils._param_validation", - "declaration": "StrOptions", - "alias": null - }, - { - "module": "sklearn.utils._param_validation", - "declaration": "validate_params", - "alias": null - }, { "module": "sklearn.utils._openmp_helpers", "declaration": "_openmp_effective_n_threads", @@ -2035,16 +2014,6 @@ "module": "sklearn.metrics", "declaration": "pairwise_distances", "alias": null - }, - { - "module": "scipy.sparse", - "declaration": "issparse", - "alias": null - }, - { - "module": "scipy.sparse", - "declaration": "SparseEfficiencyWarning", - "alias": null } ], "classes": ["sklearn/sklearn.cluster._optics/OPTICS"], @@ -3301,8 +3270,8 @@ "alias": "np" }, { - "module": "scipy", - "alias": "sp" + "module": "scipy.sparse", + "alias": null } ], "from_imports": [ @@ -3316,14 +3285,29 @@ "declaration": "Generator", "alias": null }, + { + "module": "typing", + "declaration": "Any", + "alias": null + }, + { + "module": "typing", + "declaration": "Dict", + "alias": null + }, { "module": "typing", "declaration": "List", "alias": null }, { - "module": "sklearn.externals", - "declaration": "_arff", + "module": "typing", + "declaration": "Optional", + "alias": null + }, + { + "module": "typing", + "declaration": "Tuple", "alias": null }, { @@ -3331,6 +3315,11 @@ "declaration": "ArffSparseDataType", "alias": null }, + { + "module": "sklearn.externals._arff", + "declaration": "ArffContainerType", + "alias": null + }, { "module": "sklearn.utils", "declaration": "_chunk_generator", @@ -3345,16 +3334,21 @@ "module": "sklearn.utils", "declaration": "get_chunk_n_rows", "alias": null + }, + { + "module": "sklearn.utils", + "declaration": "is_scalar_nan", + "alias": null } ], "classes": [], "functions": [ "sklearn/sklearn.datasets._arff_parser/_split_sparse_columns", "sklearn/sklearn.datasets._arff_parser/_sparse_data_to_array", - "sklearn/sklearn.datasets._arff_parser/_post_process_frame", - "sklearn/sklearn.datasets._arff_parser/_liac_arff_parser", - "sklearn/sklearn.datasets._arff_parser/_pandas_arff_parser", - "sklearn/sklearn.datasets._arff_parser/load_arff_from_gzip_file" + "sklearn/sklearn.datasets._arff_parser/_feature_to_dtype", + "sklearn/sklearn.datasets._arff_parser/_convert_arff_data", + "sklearn/sklearn.datasets._arff_parser/_convert_arff_data_dataframe", + "sklearn/sklearn.datasets._arff_parser/_liac_arff_parser" ] }, { @@ -3908,10 +3902,6 @@ "module": "gzip", "alias": null }, - { - "module": "hashlib", - "alias": null - }, { "module": "json", "alias": null @@ -3924,6 +3914,10 @@ "module": "shutil", "alias": null }, + { + "module": "hashlib", + "alias": null + }, { "module": "time", "alias": null @@ -3934,6 +3928,16 @@ } ], "from_imports": [ + { + "module": "os.path", + "declaration": "join", + "alias": null + }, + { + "module": "warnings", + "declaration": "warn", + "alias": null + }, { "module": "contextlib", "declaration": "closing", @@ -3944,11 +3948,6 @@ "declaration": "wraps", "alias": null }, - { - "module": "os.path", - "declaration": "join", - "alias": null - }, { "module": "typing", "declaration": "Callable", @@ -3990,28 +3989,28 @@ "alias": null }, { - "module": "urllib.error", - "declaration": "HTTPError", + "module": "urllib.request", + "declaration": "urlopen", "alias": null }, { - "module": "urllib.error", - "declaration": "URLError", + "module": "urllib.request", + "declaration": "Request", "alias": null }, { - "module": "urllib.request", - "declaration": "urlopen", + "module": "urllib.error", + "declaration": "HTTPError", "alias": null }, { - "module": "urllib.request", - "declaration": "Request", + "module": "urllib.error", + "declaration": "URLError", "alias": null }, { - "module": "warnings", - "declaration": "warn", + "module": "sklearn.externals", + "declaration": "_arff", "alias": null }, { @@ -4021,18 +4020,13 @@ }, { "module": "sklearn.datasets._arff_parser", - "declaration": "load_arff_from_gzip_file", + "declaration": "_liac_arff_parser", "alias": null }, { "module": "sklearn.utils", "declaration": "Bunch", "alias": null - }, - { - "module": "sklearn.utils", - "declaration": "check_pandas_support", - "alias": null } ], "classes": ["sklearn/sklearn.datasets._openml/OpenMLError"], @@ -7057,11 +7051,6 @@ "declaration": "ExtraTreeRegressor", "alias": null }, - { - "module": "sklearn.tree._tree", - "declaration": "DTYPE", - "alias": "tree_dtype" - }, { "module": "sklearn.utils", "declaration": "check_random_state", @@ -10611,10 +10600,6 @@ "id": "sklearn/sklearn.linear_model._bayes", "name": "sklearn.linear_model._bayes", "imports": [ - { - "module": "numbers", - "alias": null - }, { "module": "numpy", "alias": "np" @@ -10661,11 +10646,6 @@ "declaration": "fast_logdet", "alias": null }, - { - "module": "sklearn.utils", - "declaration": "check_scalar", - "alias": null - }, { "module": "scipy.linalg", "declaration": "pinvh", @@ -12780,11 +12760,6 @@ "declaration": "balanced_accuracy_score", "alias": null }, - { - "module": "sklearn.metrics._classification", - "declaration": "class_likelihood_ratios", - "alias": null - }, { "module": "sklearn.metrics._classification", "declaration": "classification_report", @@ -13275,7 +13250,6 @@ "sklearn/sklearn.metrics._classification/_warn_prf", "sklearn/sklearn.metrics._classification/_check_set_wise_labels", "sklearn/sklearn.metrics._classification/precision_recall_fscore_support", - "sklearn/sklearn.metrics._classification/class_likelihood_ratios", "sklearn/sklearn.metrics._classification/precision_score", "sklearn/sklearn.metrics._classification/recall_score", "sklearn/sklearn.metrics._classification/balanced_accuracy_score", @@ -13839,11 +13813,6 @@ "declaration": "matthews_corrcoef", "alias": null }, - { - "module": "sklearn.metrics", - "declaration": "class_likelihood_ratios", - "alias": null - }, { "module": "sklearn.metrics.cluster", "declaration": "adjusted_rand_score", @@ -13915,8 +13884,6 @@ "sklearn/sklearn.metrics._scorer/check_scoring", "sklearn/sklearn.metrics._scorer/_check_multimetric_scoring", "sklearn/sklearn.metrics._scorer/make_scorer", - "sklearn/sklearn.metrics._scorer/positive_likelihood_ratio", - "sklearn/sklearn.metrics._scorer/negative_likelihood_ratio", "sklearn/sklearn.metrics._scorer/get_scorer_names" ] }, @@ -15859,11 +15826,6 @@ "declaration": "NeighborhoodComponentsAnalysis", "alias": null }, - { - "module": "sklearn.neighbors._base", - "declaration": "sort_graph_by_row_values", - "alias": null - }, { "module": "sklearn.neighbors._base", "declaration": "VALID_METRICS", @@ -16042,7 +16004,6 @@ "sklearn/sklearn.neighbors._base/_get_weights", "sklearn/sklearn.neighbors._base/_is_sorted_by_data", "sklearn/sklearn.neighbors._base/_check_precomputed", - "sklearn/sklearn.neighbors._base/sort_graph_by_row_values", "sklearn/sklearn.neighbors._base/_kneighbors_from_graph", "sklearn/sklearn.neighbors._base/_radius_neighbors_from_graph", "sklearn/sklearn.neighbors._base/_tree_query_parallel_helper", @@ -16195,10 +16156,6 @@ "id": "sklearn/sklearn.neighbors._kde", "name": "sklearn.neighbors._kde", "imports": [ - { - "module": "numbers", - "alias": null - }, { "module": "numpy", "alias": "np" @@ -16220,11 +16177,6 @@ "declaration": "check_random_state", "alias": null }, - { - "module": "sklearn.utils", - "declaration": "check_scalar", - "alias": null - }, { "module": "sklearn.utils.validation", "declaration": "_check_sample_weight", @@ -18214,11 +18166,6 @@ } ], "from_imports": [ - { - "module": "sklearn.utils", - "declaration": "deprecated", - "alias": null - }, { "module": "sklearn.svm._base", "declaration": "_fit_liblinear", @@ -19159,78 +19106,6 @@ ], "functions": [] }, - { - "id": "sklearn/sklearn.utils._param_validation", - "name": "sklearn.utils._param_validation", - "imports": [ - { - "module": "functools", - "alias": null - }, - { - "module": "operator", - "alias": null - }, - { - "module": "numpy", - "alias": "np" - } - ], - "from_imports": [ - { - "module": "abc", - "declaration": "ABC", - "alias": null - }, - { - "module": "abc", - "declaration": "abstractmethod", - "alias": null - }, - { - "module": "inspect", - "declaration": "signature", - "alias": null - }, - { - "module": "numbers", - "declaration": "Integral", - "alias": null - }, - { - "module": "numbers", - "declaration": "Real", - "alias": null - }, - { - "module": "scipy.sparse", - "declaration": "issparse", - "alias": null - }, - { - "module": "sklearn.utils.validation", - "declaration": "_is_arraylike_not_scalar", - "alias": null - } - ], - "classes": [ - "sklearn/sklearn.utils._param_validation/_Constraint", - "sklearn/sklearn.utils._param_validation/_InstancesOf", - "sklearn/sklearn.utils._param_validation/_NoneConstraint", - "sklearn/sklearn.utils._param_validation/StrOptions", - "sklearn/sklearn.utils._param_validation/Interval", - "sklearn/sklearn.utils._param_validation/_ArrayLikes", - "sklearn/sklearn.utils._param_validation/_SparseMatrices", - "sklearn/sklearn.utils._param_validation/_Callables", - "sklearn/sklearn.utils._param_validation/_RandomStates" - ], - "functions": [ - "sklearn/sklearn.utils._param_validation/validate_parameter_constraints", - "sklearn/sklearn.utils._param_validation/make_constraint", - "sklearn/sklearn.utils._param_validation/validate_params", - "sklearn/sklearn.utils._param_validation/generate_invalid_param_val" - ] - }, { "id": "sklearn/sklearn.utils._pprint", "name": "sklearn.utils._pprint", @@ -19855,16 +19730,6 @@ "declaration": "check_is_fitted", "alias": null }, - { - "module": "sklearn.utils._param_validation", - "declaration": "make_constraint", - "alias": null - }, - { - "module": "sklearn.utils._param_validation", - "declaration": "generate_invalid_param_val", - "alias": null - }, { "module": "sklearn.utils", "declaration": "shuffle", @@ -20025,8 +19890,7 @@ "sklearn/sklearn.utils.estimator_checks/check_estimator_get_tags_default_keys", "sklearn/sklearn.utils.estimator_checks/check_dataframe_column_names_consistency", "sklearn/sklearn.utils.estimator_checks/check_transformer_get_feature_names_out", - "sklearn/sklearn.utils.estimator_checks/check_transformer_get_feature_names_out_pandas", - "sklearn/sklearn.utils.estimator_checks/check_param_validation" + "sklearn/sklearn.utils.estimator_checks/check_transformer_get_feature_names_out_pandas" ] }, { @@ -20973,7 +20837,6 @@ "sklearn/sklearn.base/BaseEstimator/_check_n_features", "sklearn/sklearn.base/BaseEstimator/_check_feature_names", "sklearn/sklearn.base/BaseEstimator/_validate_data", - "sklearn/sklearn.base/BaseEstimator/_validate_params", "sklearn/sklearn.base/BaseEstimator/_repr_html_@getter", "sklearn/sklearn.base/BaseEstimator/_repr_html_inner", "sklearn/sklearn.base/BaseEstimator/_repr_mimebundle_" @@ -21302,8 +21165,7 @@ "sklearn/sklearn.cluster._birch/Birch/predict", "sklearn/sklearn.cluster._birch/Birch/_predict", "sklearn/sklearn.cluster._birch/Birch/transform", - "sklearn/sklearn.cluster._birch/Birch/_global_clustering", - "sklearn/sklearn.cluster._birch/Birch/_more_tags" + "sklearn/sklearn.cluster._birch/Birch/_global_clustering" ], "is_public": true, "description": "Implements the BIRCH clustering algorithm.\n\nIt is a memory-efficient, online-learning algorithm provided as an\nalternative to :class:`MiniBatchKMeans`. It constructs a tree\ndata structure with the cluster centroids being read off the leaf.\nThese can be either the final cluster centroids or can be provided as input\nto another clustering algorithm such as :class:`AgglomerativeClustering`.\n\nRead more in the :ref:`User Guide `.\n\n.. versionadded:: 0.16", @@ -21349,6 +21211,7 @@ "superclasses": ["_BaseKMeans"], "methods": [ "sklearn/sklearn.cluster._bisect_k_means/BisectingKMeans/__init__", + "sklearn/sklearn.cluster._bisect_k_means/BisectingKMeans/_check_params", "sklearn/sklearn.cluster._bisect_k_means/BisectingKMeans/_warn_mkl_vcomp", "sklearn/sklearn.cluster._bisect_k_means/BisectingKMeans/_inertia_per_cluster", "sklearn/sklearn.cluster._bisect_k_means/BisectingKMeans/_bisect", @@ -21415,13 +21278,13 @@ "superclasses": ["_BaseKMeans"], "methods": [ "sklearn/sklearn.cluster._kmeans/KMeans/__init__", - "sklearn/sklearn.cluster._kmeans/KMeans/_check_params_vs_input", + "sklearn/sklearn.cluster._kmeans/KMeans/_check_params", "sklearn/sklearn.cluster._kmeans/KMeans/_warn_mkl_vcomp", "sklearn/sklearn.cluster._kmeans/KMeans/fit" ], "is_public": true, "description": "K-Means clustering.\n\nRead more in the :ref:`User Guide `.", - "docstring": "K-Means clustering.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n\n n_clusters : int, default=8\n The number of clusters to form as well as the number of\n centroids to generate.\n\n init : {'k-means++', 'random'}, callable or array-like of shape (n_clusters, n_features), default='k-means++'\n Method for initialization:\n\n 'k-means++' : selects initial cluster centers for k-mean\n clustering in a smart way to speed up convergence. See section\n Notes in k_init for more details.\n\n 'random': choose `n_clusters` observations (rows) at random from data\n for the initial centroids.\n\n If an array is passed, it should be of shape (n_clusters, n_features)\n and gives the initial centers.\n\n If a callable is passed, it should take arguments X, n_clusters and a\n random state and return an initialization.\n\n n_init : 'auto' or int, default=10\n Number of time the k-means algorithm will be run with different\n centroid seeds. The final results will be the best output of\n n_init consecutive runs in terms of inertia.\n\n When `n_init='auto'`, the number of runs will be 10 if using\n `init='random'`, and 1 if using `init='kmeans++'`.\n\n .. versionadded:: 1.2\n Added 'auto' option for `n_init`.\n\n .. versionchanged:: 1.4\n Default value for `n_init` will change from 10 to `'auto'` in version 1.4.\n\n max_iter : int, default=300\n Maximum number of iterations of the k-means algorithm for a\n single run.\n\n tol : float, default=1e-4\n Relative tolerance with regards to Frobenius norm of the difference\n in the cluster centers of two consecutive iterations to declare\n convergence.\n\n verbose : int, default=0\n Verbosity mode.\n\n random_state : int, RandomState instance or None, default=None\n Determines random number generation for centroid initialization. Use\n an int to make the randomness deterministic.\n See :term:`Glossary `.\n\n copy_x : bool, default=True\n When pre-computing distances it is more numerically accurate to center\n the data first. If copy_x is True (default), then the original data is\n not modified. If False, the original data is modified, and put back\n before the function returns, but small numerical differences may be\n introduced by subtracting and then adding the data mean. Note that if\n the original data is not C-contiguous, a copy will be made even if\n copy_x is False. If the original data is sparse, but not in CSR format,\n a copy will be made even if copy_x is False.\n\n algorithm : {\"lloyd\", \"elkan\", \"auto\", \"full\"}, default=\"lloyd\"\n K-means algorithm to use. The classical EM-style algorithm is `\"lloyd\"`.\n The `\"elkan\"` variation can be more efficient on some datasets with\n well-defined clusters, by using the triangle inequality. However it's\n more memory intensive due to the allocation of an extra array of shape\n `(n_samples, n_clusters)`.\n\n `\"auto\"` and `\"full\"` are deprecated and they will be removed in\n Scikit-Learn 1.3. They are both aliases for `\"lloyd\"`.\n\n .. versionchanged:: 0.18\n Added Elkan algorithm\n\n .. versionchanged:: 1.1\n Renamed \"full\" to \"lloyd\", and deprecated \"auto\" and \"full\".\n Changed \"auto\" to use \"lloyd\" instead of \"elkan\".\n\n Attributes\n ----------\n cluster_centers_ : ndarray of shape (n_clusters, n_features)\n Coordinates of cluster centers. If the algorithm stops before fully\n converging (see ``tol`` and ``max_iter``), these will not be\n consistent with ``labels_``.\n\n labels_ : ndarray of shape (n_samples,)\n Labels of each point\n\n inertia_ : float\n Sum of squared distances of samples to their closest cluster center,\n weighted by the sample weights if provided.\n\n n_iter_ : int\n Number of iterations run.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n See Also\n --------\n MiniBatchKMeans : Alternative online implementation that does incremental\n updates of the centers positions using mini-batches.\n For large scale learning (say n_samples > 10k) MiniBatchKMeans is\n probably much faster than the default batch implementation.\n\n Notes\n -----\n The k-means problem is solved using either Lloyd's or Elkan's algorithm.\n\n The average complexity is given by O(k n T), where n is the number of\n samples and T is the number of iteration.\n\n The worst case complexity is given by O(n^(k+2/p)) with\n n = n_samples, p = n_features. (D. Arthur and S. Vassilvitskii,\n 'How slow is the k-means method?' SoCG2006)\n\n In practice, the k-means algorithm is very fast (one of the fastest\n clustering algorithms available), but it falls in local minima. That's why\n it can be useful to restart it several times.\n\n If the algorithm stops before fully converging (because of ``tol`` or\n ``max_iter``), ``labels_`` and ``cluster_centers_`` will not be consistent,\n i.e. the ``cluster_centers_`` will not be the means of the points in each\n cluster. Also, the estimator will reassign ``labels_`` after the last\n iteration to make ``labels_`` consistent with ``predict`` on the training\n set.\n\n Examples\n --------\n\n >>> from sklearn.cluster import KMeans\n >>> import numpy as np\n >>> X = np.array([[1, 2], [1, 4], [1, 0],\n ... [10, 2], [10, 4], [10, 0]])\n >>> kmeans = KMeans(n_clusters=2, random_state=0, n_init=\"auto\").fit(X)\n >>> kmeans.labels_\n array([1, 1, 1, 0, 0, 0], dtype=int32)\n >>> kmeans.predict([[0, 0], [12, 3]])\n array([1, 0], dtype=int32)\n >>> kmeans.cluster_centers_\n array([[10., 2.],\n [ 1., 2.]])\n " + "docstring": "K-Means clustering.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n\n n_clusters : int, default=8\n The number of clusters to form as well as the number of\n centroids to generate.\n\n init : {'k-means++', 'random'}, callable or array-like of shape (n_clusters, n_features), default='k-means++'\n Method for initialization:\n\n 'k-means++' : selects initial cluster centers for k-mean\n clustering in a smart way to speed up convergence. See section\n Notes in k_init for more details.\n\n 'random': choose `n_clusters` observations (rows) at random from data\n for the initial centroids.\n\n If an array is passed, it should be of shape (n_clusters, n_features)\n and gives the initial centers.\n\n If a callable is passed, it should take arguments X, n_clusters and a\n random state and return an initialization.\n\n n_init : int, default=10\n Number of time the k-means algorithm will be run with different\n centroid seeds. The final results will be the best output of\n n_init consecutive runs in terms of inertia.\n\n max_iter : int, default=300\n Maximum number of iterations of the k-means algorithm for a\n single run.\n\n tol : float, default=1e-4\n Relative tolerance with regards to Frobenius norm of the difference\n in the cluster centers of two consecutive iterations to declare\n convergence.\n\n verbose : int, default=0\n Verbosity mode.\n\n random_state : int, RandomState instance or None, default=None\n Determines random number generation for centroid initialization. Use\n an int to make the randomness deterministic.\n See :term:`Glossary `.\n\n copy_x : bool, default=True\n When pre-computing distances it is more numerically accurate to center\n the data first. If copy_x is True (default), then the original data is\n not modified. If False, the original data is modified, and put back\n before the function returns, but small numerical differences may be\n introduced by subtracting and then adding the data mean. Note that if\n the original data is not C-contiguous, a copy will be made even if\n copy_x is False. If the original data is sparse, but not in CSR format,\n a copy will be made even if copy_x is False.\n\n algorithm : {\"lloyd\", \"elkan\", \"auto\", \"full\"}, default=\"lloyd\"\n K-means algorithm to use. The classical EM-style algorithm is `\"lloyd\"`.\n The `\"elkan\"` variation can be more efficient on some datasets with\n well-defined clusters, by using the triangle inequality. However it's\n more memory intensive due to the allocation of an extra array of shape\n `(n_samples, n_clusters)`.\n\n `\"auto\"` and `\"full\"` are deprecated and they will be removed in\n Scikit-Learn 1.3. They are both aliases for `\"lloyd\"`.\n\n .. versionchanged:: 0.18\n Added Elkan algorithm\n\n .. versionchanged:: 1.1\n Renamed \"full\" to \"lloyd\", and deprecated \"auto\" and \"full\".\n Changed \"auto\" to use \"lloyd\" instead of \"elkan\".\n\n Attributes\n ----------\n cluster_centers_ : ndarray of shape (n_clusters, n_features)\n Coordinates of cluster centers. If the algorithm stops before fully\n converging (see ``tol`` and ``max_iter``), these will not be\n consistent with ``labels_``.\n\n labels_ : ndarray of shape (n_samples,)\n Labels of each point\n\n inertia_ : float\n Sum of squared distances of samples to their closest cluster center,\n weighted by the sample weights if provided.\n\n n_iter_ : int\n Number of iterations run.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n See Also\n --------\n MiniBatchKMeans : Alternative online implementation that does incremental\n updates of the centers positions using mini-batches.\n For large scale learning (say n_samples > 10k) MiniBatchKMeans is\n probably much faster than the default batch implementation.\n\n Notes\n -----\n The k-means problem is solved using either Lloyd's or Elkan's algorithm.\n\n The average complexity is given by O(k n T), where n is the number of\n samples and T is the number of iteration.\n\n The worst case complexity is given by O(n^(k+2/p)) with\n n = n_samples, p = n_features. (D. Arthur and S. Vassilvitskii,\n 'How slow is the k-means method?' SoCG2006)\n\n In practice, the k-means algorithm is very fast (one of the fastest\n clustering algorithms available), but it falls in local minima. That's why\n it can be useful to restart it several times.\n\n If the algorithm stops before fully converging (because of ``tol`` or\n ``max_iter``), ``labels_`` and ``cluster_centers_`` will not be consistent,\n i.e. the ``cluster_centers_`` will not be the means of the points in each\n cluster. Also, the estimator will reassign ``labels_`` after the last\n iteration to make ``labels_`` consistent with ``predict`` on the training\n set.\n\n Examples\n --------\n\n >>> from sklearn.cluster import KMeans\n >>> import numpy as np\n >>> X = np.array([[1, 2], [1, 4], [1, 0],\n ... [10, 2], [10, 4], [10, 0]])\n >>> kmeans = KMeans(n_clusters=2, random_state=0).fit(X)\n >>> kmeans.labels_\n array([1, 1, 1, 0, 0, 0], dtype=int32)\n >>> kmeans.predict([[0, 0], [12, 3]])\n array([1, 0], dtype=int32)\n >>> kmeans.cluster_centers_\n array([[10., 2.],\n [ 1., 2.]])\n " }, { "id": "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans", @@ -21431,7 +21294,7 @@ "superclasses": ["_BaseKMeans"], "methods": [ "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/__init__", - "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/_check_params_vs_input", + "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/_check_params", "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/_warn_mkl_vcomp", "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/_mini_batch_convergence", "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/_random_reassign", @@ -21440,7 +21303,7 @@ ], "is_public": true, "description": "Mini-Batch K-Means clustering.\n\nRead more in the :ref:`User Guide `.", - "docstring": "\n Mini-Batch K-Means clustering.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n\n n_clusters : int, default=8\n The number of clusters to form as well as the number of\n centroids to generate.\n\n init : {'k-means++', 'random'}, callable or array-like of shape (n_clusters, n_features), default='k-means++'\n Method for initialization:\n\n 'k-means++' : selects initial cluster centers for k-mean\n clustering in a smart way to speed up convergence. See section\n Notes in k_init for more details.\n\n 'random': choose `n_clusters` observations (rows) at random from data\n for the initial centroids.\n\n If an array is passed, it should be of shape (n_clusters, n_features)\n and gives the initial centers.\n\n If a callable is passed, it should take arguments X, n_clusters and a\n random state and return an initialization.\n\n max_iter : int, default=100\n Maximum number of iterations over the complete dataset before\n stopping independently of any early stopping criterion heuristics.\n\n batch_size : int, default=1024\n Size of the mini batches.\n For faster computations, you can set the ``batch_size`` greater than\n 256 * number of cores to enable parallelism on all cores.\n\n .. versionchanged:: 1.0\n `batch_size` default changed from 100 to 1024.\n\n verbose : int, default=0\n Verbosity mode.\n\n compute_labels : bool, default=True\n Compute label assignment and inertia for the complete dataset\n once the minibatch optimization has converged in fit.\n\n random_state : int, RandomState instance or None, default=None\n Determines random number generation for centroid initialization and\n random reassignment. Use an int to make the randomness deterministic.\n See :term:`Glossary `.\n\n tol : float, default=0.0\n Control early stopping based on the relative center changes as\n measured by a smoothed, variance-normalized of the mean center\n squared position changes. This early stopping heuristics is\n closer to the one used for the batch variant of the algorithms\n but induces a slight computational and memory overhead over the\n inertia heuristic.\n\n To disable convergence detection based on normalized center\n change, set tol to 0.0 (default).\n\n max_no_improvement : int, default=10\n Control early stopping based on the consecutive number of mini\n batches that does not yield an improvement on the smoothed inertia.\n\n To disable convergence detection based on inertia, set\n max_no_improvement to None.\n\n init_size : int, default=None\n Number of samples to randomly sample for speeding up the\n initialization (sometimes at the expense of accuracy): the\n only algorithm is initialized by running a batch KMeans on a\n random subset of the data. This needs to be larger than n_clusters.\n\n If `None`, the heuristic is `init_size = 3 * batch_size` if\n `3 * batch_size < n_clusters`, else `init_size = 3 * n_clusters`.\n\n n_init : 'auto' or int, default=3\n Number of random initializations that are tried.\n In contrast to KMeans, the algorithm is only run once, using the\n best of the ``n_init`` initializations as measured by inertia.\n\n When `n_init='auto'`, the number of runs will be 3 if using\n `init='random'`, and 1 if using `init='kmeans++'`.\n\n .. versionadded:: 1.2\n Added 'auto' option for `n_init`.\n\n .. versionchanged:: 1.4\n Default value for `n_init` will change from 3 to `'auto'` in version 1.4.\n\n reassignment_ratio : float, default=0.01\n Control the fraction of the maximum number of counts for a center to\n be reassigned. A higher value means that low count centers are more\n easily reassigned, which means that the model will take longer to\n converge, but should converge in a better clustering. However, too high\n a value may cause convergence issues, especially with a small batch\n size.\n\n Attributes\n ----------\n\n cluster_centers_ : ndarray of shape (n_clusters, n_features)\n Coordinates of cluster centers.\n\n labels_ : ndarray of shape (n_samples,)\n Labels of each point (if compute_labels is set to True).\n\n inertia_ : float\n The value of the inertia criterion associated with the chosen\n partition if compute_labels is set to True. If compute_labels is set to\n False, it's an approximation of the inertia based on an exponentially\n weighted average of the batch inertiae.\n The inertia is defined as the sum of square distances of samples to\n their cluster center, weighted by the sample weights if provided.\n\n n_iter_ : int\n Number of iterations over the full dataset.\n\n n_steps_ : int\n Number of minibatches processed.\n\n .. versionadded:: 1.0\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n See Also\n --------\n KMeans : The classic implementation of the clustering method based on the\n Lloyd's algorithm. It consumes the whole set of input data at each\n iteration.\n\n Notes\n -----\n See https://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf\n\n Examples\n --------\n >>> from sklearn.cluster import MiniBatchKMeans\n >>> import numpy as np\n >>> X = np.array([[1, 2], [1, 4], [1, 0],\n ... [4, 2], [4, 0], [4, 4],\n ... [4, 5], [0, 1], [2, 2],\n ... [3, 2], [5, 5], [1, -1]])\n >>> # manually fit on batches\n >>> kmeans = MiniBatchKMeans(n_clusters=2,\n ... random_state=0,\n ... batch_size=6,\n ... n_init=\"auto\")\n >>> kmeans = kmeans.partial_fit(X[0:6,:])\n >>> kmeans = kmeans.partial_fit(X[6:12,:])\n >>> kmeans.cluster_centers_\n array([[2. , 1. ],\n [3.5, 4.5]])\n >>> kmeans.predict([[0, 0], [4, 4]])\n array([0, 1], dtype=int32)\n >>> # fit on the whole data\n >>> kmeans = MiniBatchKMeans(n_clusters=2,\n ... random_state=0,\n ... batch_size=6,\n ... max_iter=10,\n ... n_init=\"auto\").fit(X)\n >>> kmeans.cluster_centers_\n array([[3.97727273, 2.43181818],\n [1.125 , 1.6 ]])\n >>> kmeans.predict([[0, 0], [4, 4]])\n array([1, 0], dtype=int32)\n " + "docstring": "\n Mini-Batch K-Means clustering.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n\n n_clusters : int, default=8\n The number of clusters to form as well as the number of\n centroids to generate.\n\n init : {'k-means++', 'random'}, callable or array-like of shape (n_clusters, n_features), default='k-means++'\n Method for initialization:\n\n 'k-means++' : selects initial cluster centers for k-mean\n clustering in a smart way to speed up convergence. See section\n Notes in k_init for more details.\n\n 'random': choose `n_clusters` observations (rows) at random from data\n for the initial centroids.\n\n If an array is passed, it should be of shape (n_clusters, n_features)\n and gives the initial centers.\n\n If a callable is passed, it should take arguments X, n_clusters and a\n random state and return an initialization.\n\n max_iter : int, default=100\n Maximum number of iterations over the complete dataset before\n stopping independently of any early stopping criterion heuristics.\n\n batch_size : int, default=1024\n Size of the mini batches.\n For faster computations, you can set the ``batch_size`` greater than\n 256 * number of cores to enable parallelism on all cores.\n\n .. versionchanged:: 1.0\n `batch_size` default changed from 100 to 1024.\n\n verbose : int, default=0\n Verbosity mode.\n\n compute_labels : bool, default=True\n Compute label assignment and inertia for the complete dataset\n once the minibatch optimization has converged in fit.\n\n random_state : int, RandomState instance or None, default=None\n Determines random number generation for centroid initialization and\n random reassignment. Use an int to make the randomness deterministic.\n See :term:`Glossary `.\n\n tol : float, default=0.0\n Control early stopping based on the relative center changes as\n measured by a smoothed, variance-normalized of the mean center\n squared position changes. This early stopping heuristics is\n closer to the one used for the batch variant of the algorithms\n but induces a slight computational and memory overhead over the\n inertia heuristic.\n\n To disable convergence detection based on normalized center\n change, set tol to 0.0 (default).\n\n max_no_improvement : int, default=10\n Control early stopping based on the consecutive number of mini\n batches that does not yield an improvement on the smoothed inertia.\n\n To disable convergence detection based on inertia, set\n max_no_improvement to None.\n\n init_size : int, default=None\n Number of samples to randomly sample for speeding up the\n initialization (sometimes at the expense of accuracy): the\n only algorithm is initialized by running a batch KMeans on a\n random subset of the data. This needs to be larger than n_clusters.\n\n If `None`, the heuristic is `init_size = 3 * batch_size` if\n `3 * batch_size < n_clusters`, else `init_size = 3 * n_clusters`.\n\n n_init : int, default=3\n Number of random initializations that are tried.\n In contrast to KMeans, the algorithm is only run once, using the\n best of the ``n_init`` initializations as measured by inertia.\n\n reassignment_ratio : float, default=0.01\n Control the fraction of the maximum number of counts for a center to\n be reassigned. A higher value means that low count centers are more\n easily reassigned, which means that the model will take longer to\n converge, but should converge in a better clustering. However, too high\n a value may cause convergence issues, especially with a small batch\n size.\n\n Attributes\n ----------\n\n cluster_centers_ : ndarray of shape (n_clusters, n_features)\n Coordinates of cluster centers.\n\n labels_ : ndarray of shape (n_samples,)\n Labels of each point (if compute_labels is set to True).\n\n inertia_ : float\n The value of the inertia criterion associated with the chosen\n partition if compute_labels is set to True. If compute_labels is set to\n False, it's an approximation of the inertia based on an exponentially\n weighted average of the batch inertiae.\n The inertia is defined as the sum of square distances of samples to\n their cluster center, weighted by the sample weights if provided.\n\n n_iter_ : int\n Number of iterations over the full dataset.\n\n n_steps_ : int\n Number of minibatches processed.\n\n .. versionadded:: 1.0\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n See Also\n --------\n KMeans : The classic implementation of the clustering method based on the\n Lloyd's algorithm. It consumes the whole set of input data at each\n iteration.\n\n Notes\n -----\n See https://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf\n\n Examples\n --------\n >>> from sklearn.cluster import MiniBatchKMeans\n >>> import numpy as np\n >>> X = np.array([[1, 2], [1, 4], [1, 0],\n ... [4, 2], [4, 0], [4, 4],\n ... [4, 5], [0, 1], [2, 2],\n ... [3, 2], [5, 5], [1, -1]])\n >>> # manually fit on batches\n >>> kmeans = MiniBatchKMeans(n_clusters=2,\n ... random_state=0,\n ... batch_size=6)\n >>> kmeans = kmeans.partial_fit(X[0:6,:])\n >>> kmeans = kmeans.partial_fit(X[6:12,:])\n >>> kmeans.cluster_centers_\n array([[2. , 1. ],\n [3.5, 4.5]])\n >>> kmeans.predict([[0, 0], [4, 4]])\n array([0, 1], dtype=int32)\n >>> # fit on the whole data\n >>> kmeans = MiniBatchKMeans(n_clusters=2,\n ... random_state=0,\n ... batch_size=6,\n ... max_iter=10).fit(X)\n >>> kmeans.cluster_centers_\n array([[1.19..., 1.22...],\n [4.03..., 2.46...]])\n >>> kmeans.predict([[0, 0], [4, 4]])\n array([0, 1], dtype=int32)\n " }, { "id": "sklearn/sklearn.cluster._kmeans/_BaseKMeans", @@ -21456,7 +21319,7 @@ ], "methods": [ "sklearn/sklearn.cluster._kmeans/_BaseKMeans/__init__", - "sklearn/sklearn.cluster._kmeans/_BaseKMeans/_check_params_vs_input", + "sklearn/sklearn.cluster._kmeans/_BaseKMeans/_check_params", "sklearn/sklearn.cluster._kmeans/_BaseKMeans/_warn_mkl_vcomp", "sklearn/sklearn.cluster._kmeans/_BaseKMeans/_check_mkl_vcomp", "sklearn/sklearn.cluster._kmeans/_BaseKMeans/_validate_center_shape", @@ -21501,7 +21364,7 @@ ], "is_public": true, "description": "Estimate clustering structure from vector array.\n\nOPTICS (Ordering Points To Identify the Clustering Structure), closely\nrelated to DBSCAN, finds core sample of high density and expands clusters\nfrom them [1]_. Unlike DBSCAN, keeps cluster hierarchy for a variable\nneighborhood radius. Better suited for usage on large datasets than the\ncurrent sklearn implementation of DBSCAN.\n\nClusters are then extracted using a DBSCAN-like method\n(cluster_method = 'dbscan') or an automatic\ntechnique proposed in [1]_ (cluster_method = 'xi').\n\nThis implementation deviates from the original OPTICS by first performing\nk-nearest-neighborhood searches on all points to identify core sizes, then\ncomputing only the distances to unprocessed points when constructing the\ncluster order. Note that we do not employ a heap to manage the expansion\ncandidates, so the time complexity will be O(n^2).\n\nRead more in the :ref:`User Guide `.", - "docstring": "Estimate clustering structure from vector array.\n\n OPTICS (Ordering Points To Identify the Clustering Structure), closely\n related to DBSCAN, finds core sample of high density and expands clusters\n from them [1]_. Unlike DBSCAN, keeps cluster hierarchy for a variable\n neighborhood radius. Better suited for usage on large datasets than the\n current sklearn implementation of DBSCAN.\n\n Clusters are then extracted using a DBSCAN-like method\n (cluster_method = 'dbscan') or an automatic\n technique proposed in [1]_ (cluster_method = 'xi').\n\n This implementation deviates from the original OPTICS by first performing\n k-nearest-neighborhood searches on all points to identify core sizes, then\n computing only the distances to unprocessed points when constructing the\n cluster order. Note that we do not employ a heap to manage the expansion\n candidates, so the time complexity will be O(n^2).\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n min_samples : int > 1 or float between 0 and 1, default=5\n The number of samples in a neighborhood for a point to be considered as\n a core point. Also, up and down steep regions can't have more than\n ``min_samples`` consecutive non-steep points. Expressed as an absolute\n number or a fraction of the number of samples (rounded to be at least\n 2).\n\n max_eps : float, default=np.inf\n The maximum distance between two samples for one to be considered as\n in the neighborhood of the other. Default value of ``np.inf`` will\n identify clusters across all scales; reducing ``max_eps`` will result\n in shorter run times.\n\n metric : str or callable, default='minkowski'\n Metric to use for distance computation. Any metric from scikit-learn\n or scipy.spatial.distance can be used.\n\n If metric is a callable function, it is called on each\n pair of instances (rows) and the resulting value recorded. The callable\n should take two arrays as input and return one value indicating the\n distance between them. This works for Scipy's metrics, but is less\n efficient than passing the metric name as a string. If metric is\n \"precomputed\", `X` is assumed to be a distance matrix and must be\n square.\n\n Valid values for metric are:\n\n - from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',\n 'manhattan']\n\n - from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',\n 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',\n 'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',\n 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',\n 'yule']\n\n Sparse matrices are only supported by scikit-learn metrics.\n See the documentation for scipy.spatial.distance for details on these\n metrics.\n\n p : int, default=2\n Parameter for the Minkowski metric from\n :class:`~sklearn.metrics.pairwise_distances`. When p = 1, this is\n equivalent to using manhattan_distance (l1), and euclidean_distance\n (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.\n\n metric_params : dict, default=None\n Additional keyword arguments for the metric function.\n\n cluster_method : str, default='xi'\n The extraction method used to extract clusters using the calculated\n reachability and ordering. Possible values are \"xi\" and \"dbscan\".\n\n eps : float, default=None\n The maximum distance between two samples for one to be considered as\n in the neighborhood of the other. By default it assumes the same value\n as ``max_eps``.\n Used only when ``cluster_method='dbscan'``.\n\n xi : float between 0 and 1, default=0.05\n Determines the minimum steepness on the reachability plot that\n constitutes a cluster boundary. For example, an upwards point in the\n reachability plot is defined by the ratio from one point to its\n successor being at most 1-xi.\n Used only when ``cluster_method='xi'``.\n\n predecessor_correction : bool, default=True\n Correct clusters according to the predecessors calculated by OPTICS\n [2]_. This parameter has minimal effect on most datasets.\n Used only when ``cluster_method='xi'``.\n\n min_cluster_size : int > 1 or float between 0 and 1, default=None\n Minimum number of samples in an OPTICS cluster, expressed as an\n absolute number or a fraction of the number of samples (rounded to be\n at least 2). If ``None``, the value of ``min_samples`` is used instead.\n Used only when ``cluster_method='xi'``.\n\n algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'\n Algorithm used to compute the nearest neighbors:\n\n - 'ball_tree' will use :class:`BallTree`.\n - 'kd_tree' will use :class:`KDTree`.\n - 'brute' will use a brute-force search.\n - 'auto' (default) will attempt to decide the most appropriate\n algorithm based on the values passed to :meth:`fit` method.\n\n Note: fitting on sparse input will override the setting of\n this parameter, using brute force.\n\n leaf_size : int, default=30\n Leaf size passed to :class:`BallTree` or :class:`KDTree`. This can\n affect the speed of the construction and query, as well as the memory\n required to store the tree. The optimal value depends on the\n nature of the problem.\n\n memory : str or object with the joblib.Memory interface, default=None\n Used to cache the output of the computation of the tree.\n By default, no caching is done. If a string is given, it is the\n path to the caching directory.\n\n n_jobs : int, default=None\n The number of parallel jobs to run for neighbors search.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary `\n for more details.\n\n Attributes\n ----------\n labels_ : ndarray of shape (n_samples,)\n Cluster labels for each point in the dataset given to fit().\n Noisy samples and points which are not included in a leaf cluster\n of ``cluster_hierarchy_`` are labeled as -1.\n\n reachability_ : ndarray of shape (n_samples,)\n Reachability distances per sample, indexed by object order. Use\n ``clust.reachability_[clust.ordering_]`` to access in cluster order.\n\n ordering_ : ndarray of shape (n_samples,)\n The cluster ordered list of sample indices.\n\n core_distances_ : ndarray of shape (n_samples,)\n Distance at which each sample becomes a core point, indexed by object\n order. Points which will never be core have a distance of inf. Use\n ``clust.core_distances_[clust.ordering_]`` to access in cluster order.\n\n predecessor_ : ndarray of shape (n_samples,)\n Point that a sample was reached from, indexed by object order.\n Seed points have a predecessor of -1.\n\n cluster_hierarchy_ : ndarray of shape (n_clusters, 2)\n The list of clusters in the form of ``[start, end]`` in each row, with\n all indices inclusive. The clusters are ordered according to\n ``(end, -start)`` (ascending) so that larger clusters encompassing\n smaller clusters come after those smaller ones. Since ``labels_`` does\n not reflect the hierarchy, usually\n ``len(cluster_hierarchy_) > np.unique(optics.labels_)``. Please also\n note that these indices are of the ``ordering_``, i.e.\n ``X[ordering_][start:end + 1]`` form a cluster.\n Only available when ``cluster_method='xi'``.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n See Also\n --------\n DBSCAN : A similar clustering for a specified neighborhood radius (eps).\n Our implementation is optimized for runtime.\n\n References\n ----------\n .. [1] Ankerst, Mihael, Markus M. Breunig, Hans-Peter Kriegel,\n and J\u00f6rg Sander. \"OPTICS: ordering points to identify the clustering\n structure.\" ACM SIGMOD Record 28, no. 2 (1999): 49-60.\n\n .. [2] Schubert, Erich, Michael Gertz.\n \"Improving the Cluster Structure Extracted from OPTICS Plots.\" Proc. of\n the Conference \"Lernen, Wissen, Daten, Analysen\" (LWDA) (2018): 318-329.\n\n Examples\n --------\n >>> from sklearn.cluster import OPTICS\n >>> import numpy as np\n >>> X = np.array([[1, 2], [2, 5], [3, 6],\n ... [8, 7], [8, 8], [7, 3]])\n >>> clustering = OPTICS(min_samples=2).fit(X)\n >>> clustering.labels_\n array([0, 0, 0, 1, 1, 1])\n " + "docstring": "Estimate clustering structure from vector array.\n\n OPTICS (Ordering Points To Identify the Clustering Structure), closely\n related to DBSCAN, finds core sample of high density and expands clusters\n from them [1]_. Unlike DBSCAN, keeps cluster hierarchy for a variable\n neighborhood radius. Better suited for usage on large datasets than the\n current sklearn implementation of DBSCAN.\n\n Clusters are then extracted using a DBSCAN-like method\n (cluster_method = 'dbscan') or an automatic\n technique proposed in [1]_ (cluster_method = 'xi').\n\n This implementation deviates from the original OPTICS by first performing\n k-nearest-neighborhood searches on all points to identify core sizes, then\n computing only the distances to unprocessed points when constructing the\n cluster order. Note that we do not employ a heap to manage the expansion\n candidates, so the time complexity will be O(n^2).\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n min_samples : int > 1 or float between 0 and 1, default=5\n The number of samples in a neighborhood for a point to be considered as\n a core point. Also, up and down steep regions can't have more than\n ``min_samples`` consecutive non-steep points. Expressed as an absolute\n number or a fraction of the number of samples (rounded to be at least\n 2).\n\n max_eps : float, default=np.inf\n The maximum distance between two samples for one to be considered as\n in the neighborhood of the other. Default value of ``np.inf`` will\n identify clusters across all scales; reducing ``max_eps`` will result\n in shorter run times.\n\n metric : str or callable, default='minkowski'\n Metric to use for distance computation. Any metric from scikit-learn\n or scipy.spatial.distance can be used.\n\n If metric is a callable function, it is called on each\n pair of instances (rows) and the resulting value recorded. The callable\n should take two arrays as input and return one value indicating the\n distance between them. This works for Scipy's metrics, but is less\n efficient than passing the metric name as a string. If metric is\n \"precomputed\", `X` is assumed to be a distance matrix and must be\n square.\n\n Valid values for metric are:\n\n - from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',\n 'manhattan']\n\n - from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',\n 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',\n 'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',\n 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',\n 'yule']\n\n See the documentation for scipy.spatial.distance for details on these\n metrics.\n\n p : int, default=2\n Parameter for the Minkowski metric from\n :class:`~sklearn.metrics.pairwise_distances`. When p = 1, this is\n equivalent to using manhattan_distance (l1), and euclidean_distance\n (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.\n\n metric_params : dict, default=None\n Additional keyword arguments for the metric function.\n\n cluster_method : str, default='xi'\n The extraction method used to extract clusters using the calculated\n reachability and ordering. Possible values are \"xi\" and \"dbscan\".\n\n eps : float, default=None\n The maximum distance between two samples for one to be considered as\n in the neighborhood of the other. By default it assumes the same value\n as ``max_eps``.\n Used only when ``cluster_method='dbscan'``.\n\n xi : float between 0 and 1, default=0.05\n Determines the minimum steepness on the reachability plot that\n constitutes a cluster boundary. For example, an upwards point in the\n reachability plot is defined by the ratio from one point to its\n successor being at most 1-xi.\n Used only when ``cluster_method='xi'``.\n\n predecessor_correction : bool, default=True\n Correct clusters according to the predecessors calculated by OPTICS\n [2]_. This parameter has minimal effect on most datasets.\n Used only when ``cluster_method='xi'``.\n\n min_cluster_size : int > 1 or float between 0 and 1, default=None\n Minimum number of samples in an OPTICS cluster, expressed as an\n absolute number or a fraction of the number of samples (rounded to be\n at least 2). If ``None``, the value of ``min_samples`` is used instead.\n Used only when ``cluster_method='xi'``.\n\n algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'\n Algorithm used to compute the nearest neighbors:\n\n - 'ball_tree' will use :class:`BallTree`.\n - 'kd_tree' will use :class:`KDTree`.\n - 'brute' will use a brute-force search.\n - 'auto' (default) will attempt to decide the most appropriate\n algorithm based on the values passed to :meth:`fit` method.\n\n Note: fitting on sparse input will override the setting of\n this parameter, using brute force.\n\n leaf_size : int, default=30\n Leaf size passed to :class:`BallTree` or :class:`KDTree`. This can\n affect the speed of the construction and query, as well as the memory\n required to store the tree. The optimal value depends on the\n nature of the problem.\n\n memory : str or object with the joblib.Memory interface, default=None\n Used to cache the output of the computation of the tree.\n By default, no caching is done. If a string is given, it is the\n path to the caching directory.\n\n n_jobs : int, default=None\n The number of parallel jobs to run for neighbors search.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary `\n for more details.\n\n Attributes\n ----------\n labels_ : ndarray of shape (n_samples,)\n Cluster labels for each point in the dataset given to fit().\n Noisy samples and points which are not included in a leaf cluster\n of ``cluster_hierarchy_`` are labeled as -1.\n\n reachability_ : ndarray of shape (n_samples,)\n Reachability distances per sample, indexed by object order. Use\n ``clust.reachability_[clust.ordering_]`` to access in cluster order.\n\n ordering_ : ndarray of shape (n_samples,)\n The cluster ordered list of sample indices.\n\n core_distances_ : ndarray of shape (n_samples,)\n Distance at which each sample becomes a core point, indexed by object\n order. Points which will never be core have a distance of inf. Use\n ``clust.core_distances_[clust.ordering_]`` to access in cluster order.\n\n predecessor_ : ndarray of shape (n_samples,)\n Point that a sample was reached from, indexed by object order.\n Seed points have a predecessor of -1.\n\n cluster_hierarchy_ : ndarray of shape (n_clusters, 2)\n The list of clusters in the form of ``[start, end]`` in each row, with\n all indices inclusive. The clusters are ordered according to\n ``(end, -start)`` (ascending) so that larger clusters encompassing\n smaller clusters come after those smaller ones. Since ``labels_`` does\n not reflect the hierarchy, usually\n ``len(cluster_hierarchy_) > np.unique(optics.labels_)``. Please also\n note that these indices are of the ``ordering_``, i.e.\n ``X[ordering_][start:end + 1]`` form a cluster.\n Only available when ``cluster_method='xi'``.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n See Also\n --------\n DBSCAN : A similar clustering for a specified neighborhood radius (eps).\n Our implementation is optimized for runtime.\n\n References\n ----------\n .. [1] Ankerst, Mihael, Markus M. Breunig, Hans-Peter Kriegel,\n and J\u00f6rg Sander. \"OPTICS: ordering points to identify the clustering\n structure.\" ACM SIGMOD Record 28, no. 2 (1999): 49-60.\n\n .. [2] Schubert, Erich, Michael Gertz.\n \"Improving the Cluster Structure Extracted from OPTICS Plots.\" Proc. of\n the Conference \"Lernen, Wissen, Daten, Analysen\" (LWDA) (2018): 318-329.\n\n Examples\n --------\n >>> from sklearn.cluster import OPTICS\n >>> import numpy as np\n >>> X = np.array([[1, 2], [2, 5], [3, 6],\n ... [8, 7], [8, 8], [7, 3]])\n >>> clustering = OPTICS(min_samples=2).fit(X)\n >>> clustering.labels_\n array([0, 0, 0, 1, 1, 1])\n " }, { "id": "sklearn/sklearn.cluster._spectral/SpectralClustering", @@ -21942,7 +21805,7 @@ ], "is_public": true, "description": "FastICA: a fast algorithm for Independent Component Analysis.\n\nThe implementation is based on [1]_.\n\nRead more in the :ref:`User Guide `.", - "docstring": "FastICA: a fast algorithm for Independent Component Analysis.\n\n The implementation is based on [1]_.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n n_components : int, default=None\n Number of components to use. If None is passed, all are used.\n\n algorithm : {'parallel', 'deflation'}, default='parallel'\n Specify which algorithm to use for FastICA.\n\n whiten : str or bool, default=\"warn\"\n Specify the whitening strategy to use.\n\n - If 'arbitrary-variance' (default), a whitening with variance\n arbitrary is used.\n - If 'unit-variance', the whitening matrix is rescaled to ensure that\n each recovered source has unit variance.\n - If False, the data is already considered to be whitened, and no\n whitening is performed.\n\n .. deprecated:: 1.1\n Starting in v1.3, `whiten='unit-variance'` will be used by default.\n `whiten=True` is deprecated from 1.1 and will raise ValueError in 1.3.\n Use `whiten=arbitrary-variance` instead.\n\n fun : {'logcosh', 'exp', 'cube'} or callable, default='logcosh'\n The functional form of the G function used in the\n approximation to neg-entropy. Could be either 'logcosh', 'exp',\n or 'cube'.\n You can also provide your own function. It should return a tuple\n containing the value of the function, and of its derivative, in the\n point. The derivative should be averaged along its last dimension.\n Example::\n\n def my_g(x):\n return x ** 3, (3 * x ** 2).mean(axis=-1)\n\n fun_args : dict, default=None\n Arguments to send to the functional form.\n If empty or None and if fun='logcosh', fun_args will take value\n {'alpha' : 1.0}.\n\n max_iter : int, default=200\n Maximum number of iterations during fit.\n\n tol : float, default=1e-4\n A positive scalar giving the tolerance at which the\n un-mixing matrix is considered to have converged.\n\n w_init : ndarray of shape (n_components, n_components), default=None\n Initial un-mixing array. If `w_init=None`, then an array of values\n drawn from a normal distribution is used.\n\n random_state : int, RandomState instance or None, default=None\n Used to initialize ``w_init`` when not specified, with a\n normal distribution. Pass an int, for reproducible results\n across multiple function calls.\n See :term:`Glossary `.\n\n Attributes\n ----------\n components_ : ndarray of shape (n_components, n_features)\n The linear operator to apply to the data to get the independent\n sources. This is equal to the unmixing matrix when ``whiten`` is\n False, and equal to ``np.dot(unmixing_matrix, self.whitening_)`` when\n ``whiten`` is True.\n\n mixing_ : ndarray of shape (n_features, n_components)\n The pseudo-inverse of ``components_``. It is the linear operator\n that maps independent sources to the data.\n\n mean_ : ndarray of shape(n_features,)\n The mean over features. Only set if `self.whiten` is True.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n n_iter_ : int\n If the algorithm is \"deflation\", n_iter is the\n maximum number of iterations run across all components. Else\n they are just the number of iterations taken to converge.\n\n whitening_ : ndarray of shape (n_components, n_features)\n Only set if whiten is 'True'. This is the pre-whitening matrix\n that projects data onto the first `n_components` principal components.\n\n See Also\n --------\n PCA : Principal component analysis (PCA).\n IncrementalPCA : Incremental principal components analysis (IPCA).\n KernelPCA : Kernel Principal component analysis (KPCA).\n MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis.\n SparsePCA : Sparse Principal Components Analysis (SparsePCA).\n\n References\n ----------\n .. [1] A. Hyvarinen and E. Oja, Independent Component Analysis:\n Algorithms and Applications, Neural Networks, 13(4-5), 2000,\n pp. 411-430.\n\n Examples\n --------\n >>> from sklearn.datasets import load_digits\n >>> from sklearn.decomposition import FastICA\n >>> X, _ = load_digits(return_X_y=True)\n >>> transformer = FastICA(n_components=7,\n ... random_state=0,\n ... whiten='unit-variance')\n >>> X_transformed = transformer.fit_transform(X)\n >>> X_transformed.shape\n (1797, 7)\n " + "docstring": "FastICA: a fast algorithm for Independent Component Analysis.\n\n The implementation is based on [1]_.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n n_components : int, default=None\n Number of components to use. If None is passed, all are used.\n\n algorithm : {'parallel', 'deflation'}, default='parallel'\n Apply parallel or deflational algorithm for FastICA.\n\n whiten : str or bool, default=\"warn\"\n Specify the whitening strategy to use.\n If 'arbitrary-variance' (default), a whitening with variance arbitrary is used.\n If 'unit-variance', the whitening matrix is rescaled to ensure that each\n recovered source has unit variance.\n If False, the data is already considered to be whitened, and no\n whitening is performed.\n\n .. deprecated:: 1.1\n From version 1.3 whiten='unit-variance' will be used by default.\n `whiten=True` is deprecated from 1.1 and will raise ValueError in 1.3.\n Use `whiten=arbitrary-variance` instead.\n\n fun : {'logcosh', 'exp', 'cube'} or callable, default='logcosh'\n The functional form of the G function used in the\n approximation to neg-entropy. Could be either 'logcosh', 'exp',\n or 'cube'.\n You can also provide your own function. It should return a tuple\n containing the value of the function, and of its derivative, in the\n point. Example::\n\n def my_g(x):\n return x ** 3, (3 * x ** 2).mean(axis=-1)\n\n fun_args : dict, default=None\n Arguments to send to the functional form.\n If empty and if fun='logcosh', fun_args will take value\n {'alpha' : 1.0}.\n\n max_iter : int, default=200\n Maximum number of iterations during fit.\n\n tol : float, default=1e-4\n Tolerance on update at each iteration.\n\n w_init : ndarray of shape (n_components, n_components), default=None\n The mixing matrix to be used to initialize the algorithm.\n\n random_state : int, RandomState instance or None, default=None\n Used to initialize ``w_init`` when not specified, with a\n normal distribution. Pass an int, for reproducible results\n across multiple function calls.\n See :term:`Glossary `.\n\n Attributes\n ----------\n components_ : ndarray of shape (n_components, n_features)\n The linear operator to apply to the data to get the independent\n sources. This is equal to the unmixing matrix when ``whiten`` is\n False, and equal to ``np.dot(unmixing_matrix, self.whitening_)`` when\n ``whiten`` is True.\n\n mixing_ : ndarray of shape (n_features, n_components)\n The pseudo-inverse of ``components_``. It is the linear operator\n that maps independent sources to the data.\n\n mean_ : ndarray of shape(n_features,)\n The mean over features. Only set if `self.whiten` is True.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n n_iter_ : int\n If the algorithm is \"deflation\", n_iter is the\n maximum number of iterations run across all components. Else\n they are just the number of iterations taken to converge.\n\n whitening_ : ndarray of shape (n_components, n_features)\n Only set if whiten is 'True'. This is the pre-whitening matrix\n that projects data onto the first `n_components` principal components.\n\n See Also\n --------\n PCA : Principal component analysis (PCA).\n IncrementalPCA : Incremental principal components analysis (IPCA).\n KernelPCA : Kernel Principal component analysis (KPCA).\n MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis.\n SparsePCA : Sparse Principal Components Analysis (SparsePCA).\n\n References\n ----------\n .. [1] A. Hyvarinen and E. Oja, Independent Component Analysis:\n Algorithms and Applications, Neural Networks, 13(4-5), 2000,\n pp. 411-430.\n\n Examples\n --------\n >>> from sklearn.datasets import load_digits\n >>> from sklearn.decomposition import FastICA\n >>> X, _ = load_digits(return_X_y=True)\n >>> transformer = FastICA(n_components=7,\n ... random_state=0,\n ... whiten='unit-variance')\n >>> X_transformed = transformer.fit_transform(X)\n >>> X_transformed.shape\n (1797, 7)\n " }, { "id": "sklearn/sklearn.decomposition._incremental_pca/IncrementalPCA", @@ -22474,8 +22337,8 @@ "sklearn/sklearn.ensemble._gb/GradientBoostingClassifier/staged_predict_proba" ], "is_public": true, - "description": "Gradient Boosting for classification.\n\nThis algorithm builds an additive model in a forward stage-wise fashion; it\nallows for the optimization of arbitrary differentiable loss functions. In\neach stage ``n_classes_`` regression trees are fit on the negative gradient\nof the loss function, e.g. binary or multiclass log loss. Binary\nclassification is a special case where only a single regression tree is\ninduced.\n\n:class:`sklearn.ensemble.HistGradientBoostingClassifier` is a much faster\nvariant of this algorithm for intermediate datasets (`n_samples >= 10_000`).\n\nRead more in the :ref:`User Guide `.", - "docstring": "Gradient Boosting for classification.\n\n This algorithm builds an additive model in a forward stage-wise fashion; it\n allows for the optimization of arbitrary differentiable loss functions. In\n each stage ``n_classes_`` regression trees are fit on the negative gradient\n of the loss function, e.g. binary or multiclass log loss. Binary\n classification is a special case where only a single regression tree is\n induced.\n\n :class:`sklearn.ensemble.HistGradientBoostingClassifier` is a much faster\n variant of this algorithm for intermediate datasets (`n_samples >= 10_000`).\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n loss : {'log_loss', 'deviance', 'exponential'}, default='log_loss'\n The loss function to be optimized. 'log_loss' refers to binomial and\n multinomial deviance, the same as used in logistic regression.\n It is a good choice for classification with probabilistic outputs.\n For loss 'exponential', gradient boosting recovers the AdaBoost algorithm.\n\n .. deprecated:: 1.1\n The loss 'deviance' was deprecated in v1.1 and will be removed in\n version 1.3. Use `loss='log_loss'` which is equivalent.\n\n learning_rate : float, default=0.1\n Learning rate shrinks the contribution of each tree by `learning_rate`.\n There is a trade-off between learning_rate and n_estimators.\n Values must be in the range `(0.0, inf)`.\n\n n_estimators : int, default=100\n The number of boosting stages to perform. Gradient boosting\n is fairly robust to over-fitting so a large number usually\n results in better performance.\n Values must be in the range `[1, inf)`.\n\n subsample : float, default=1.0\n The fraction of samples to be used for fitting the individual base\n learners. If smaller than 1.0 this results in Stochastic Gradient\n Boosting. `subsample` interacts with the parameter `n_estimators`.\n Choosing `subsample < 1.0` leads to a reduction of variance\n and an increase in bias.\n Values must be in the range `(0.0, 1.0]`.\n\n criterion : {'friedman_mse', 'squared_error', 'mse'}, default='friedman_mse'\n The function to measure the quality of a split. Supported criteria are\n 'friedman_mse' for the mean squared error with improvement score by\n Friedman, 'squared_error' for mean squared error. The default value of\n 'friedman_mse' is generally the best as it can provide a better\n approximation in some cases.\n\n .. versionadded:: 0.18\n\n .. deprecated:: 1.0\n Criterion 'mse' was deprecated in v1.0 and will be removed in\n version 1.2. Use `criterion='squared_error'` which is equivalent.\n\n min_samples_split : int or float, default=2\n The minimum number of samples required to split an internal node:\n\n - If int, values must be in the range `[2, inf)`.\n - If float, values must be in the range `(0.0, 1.0]` and `min_samples_split`\n will be `ceil(min_samples_split * n_samples)`.\n\n .. versionchanged:: 0.18\n Added float values for fractions.\n\n min_samples_leaf : int or float, default=1\n The minimum number of samples required to be at a leaf node.\n A split point at any depth will only be considered if it leaves at\n least ``min_samples_leaf`` training samples in each of the left and\n right branches. This may have the effect of smoothing the model,\n especially in regression.\n\n - If int, values must be in the range `[1, inf)`.\n - If float, values must be in the range `(0.0, 1.0]` and `min_samples_leaf`\n will be `ceil(min_samples_leaf * n_samples)`.\n\n .. versionchanged:: 0.18\n Added float values for fractions.\n\n min_weight_fraction_leaf : float, default=0.0\n The minimum weighted fraction of the sum total of weights (of all\n the input samples) required to be at a leaf node. Samples have\n equal weight when sample_weight is not provided.\n Values must be in the range `[0.0, 0.5]`.\n\n max_depth : int, default=3\n The maximum depth of the individual regression estimators. The maximum\n depth limits the number of nodes in the tree. Tune this parameter\n for best performance; the best value depends on the interaction\n of the input variables.\n Values must be in the range `[1, inf)`.\n\n min_impurity_decrease : float, default=0.0\n A node will be split if this split induces a decrease of the impurity\n greater than or equal to this value.\n Values must be in the range `[0.0, inf)`.\n\n The weighted impurity decrease equation is the following::\n\n N_t / N * (impurity - N_t_R / N_t * right_impurity\n - N_t_L / N_t * left_impurity)\n\n where ``N`` is the total number of samples, ``N_t`` is the number of\n samples at the current node, ``N_t_L`` is the number of samples in the\n left child, and ``N_t_R`` is the number of samples in the right child.\n\n ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,\n if ``sample_weight`` is passed.\n\n .. versionadded:: 0.19\n\n init : estimator or 'zero', default=None\n An estimator object that is used to compute the initial predictions.\n ``init`` has to provide :meth:`fit` and :meth:`predict_proba`. If\n 'zero', the initial raw predictions are set to zero. By default, a\n ``DummyEstimator`` predicting the classes priors is used.\n\n random_state : int, RandomState instance or None, default=None\n Controls the random seed given to each Tree estimator at each\n boosting iteration.\n In addition, it controls the random permutation of the features at\n each split (see Notes for more details).\n It also controls the random splitting of the training data to obtain a\n validation set if `n_iter_no_change` is not None.\n Pass an int for reproducible output across multiple function calls.\n See :term:`Glossary `.\n\n max_features : {'auto', 'sqrt', 'log2'}, int or float, default=None\n The number of features to consider when looking for the best split:\n\n - If int, values must be in the range `[1, inf)`.\n - If float, values must be in the range `(0.0, 1.0]` and the features\n considered at each split will be `int(max_features * n_features)`.\n - If 'auto', then `max_features=sqrt(n_features)`.\n - If 'sqrt', then `max_features=sqrt(n_features)`.\n - If 'log2', then `max_features=log2(n_features)`.\n - If None, then `max_features=n_features`.\n\n Choosing `max_features < n_features` leads to a reduction of variance\n and an increase in bias.\n\n Note: the search for a split does not stop until at least one\n valid partition of the node samples is found, even if it requires to\n effectively inspect more than ``max_features`` features.\n\n verbose : int, default=0\n Enable verbose output. If 1 then it prints progress and performance\n once in a while (the more trees the lower the frequency). If greater\n than 1 then it prints progress and performance for every tree.\n Values must be in the range `[0, inf)`.\n\n max_leaf_nodes : int, default=None\n Grow trees with ``max_leaf_nodes`` in best-first fashion.\n Best nodes are defined as relative reduction in impurity.\n Values must be in the range `[2, inf)`.\n If `None`, then unlimited number of leaf nodes.\n\n warm_start : bool, default=False\n When set to ``True``, reuse the solution of the previous call to fit\n and add more estimators to the ensemble, otherwise, just erase the\n previous solution. See :term:`the Glossary `.\n\n validation_fraction : float, default=0.1\n The proportion of training data to set aside as validation set for\n early stopping. Values must be in the range `(0.0, 1.0)`.\n Only used if ``n_iter_no_change`` is set to an integer.\n\n .. versionadded:: 0.20\n\n n_iter_no_change : int, default=None\n ``n_iter_no_change`` is used to decide if early stopping will be used\n to terminate training when validation score is not improving. By\n default it is set to None to disable early stopping. If set to a\n number, it will set aside ``validation_fraction`` size of the training\n data as validation and terminate training when validation score is not\n improving in all of the previous ``n_iter_no_change`` numbers of\n iterations. The split is stratified.\n Values must be in the range `[1, inf)`.\n\n .. versionadded:: 0.20\n\n tol : float, default=1e-4\n Tolerance for the early stopping. When the loss is not improving\n by at least tol for ``n_iter_no_change`` iterations (if set to a\n number), the training stops.\n Values must be in the range `(0.0, inf)`.\n\n .. versionadded:: 0.20\n\n ccp_alpha : non-negative float, default=0.0\n Complexity parameter used for Minimal Cost-Complexity Pruning. The\n subtree with the largest cost complexity that is smaller than\n ``ccp_alpha`` will be chosen. By default, no pruning is performed.\n Values must be in the range `[0.0, inf)`.\n See :ref:`minimal_cost_complexity_pruning` for details.\n\n .. versionadded:: 0.22\n\n Attributes\n ----------\n n_estimators_ : int\n The number of estimators as selected by early stopping (if\n ``n_iter_no_change`` is specified). Otherwise it is set to\n ``n_estimators``.\n\n .. versionadded:: 0.20\n\n feature_importances_ : ndarray of shape (n_features,)\n The impurity-based feature importances.\n The higher, the more important the feature.\n The importance of a feature is computed as the (normalized)\n total reduction of the criterion brought by that feature. It is also\n known as the Gini importance.\n\n Warning: impurity-based feature importances can be misleading for\n high cardinality features (many unique values). See\n :func:`sklearn.inspection.permutation_importance` as an alternative.\n\n oob_improvement_ : ndarray of shape (n_estimators,)\n The improvement in loss (= deviance) on the out-of-bag samples\n relative to the previous iteration.\n ``oob_improvement_[0]`` is the improvement in\n loss of the first stage over the ``init`` estimator.\n Only available if ``subsample < 1.0``\n\n train_score_ : ndarray of shape (n_estimators,)\n The i-th score ``train_score_[i]`` is the deviance (= loss) of the\n model at iteration ``i`` on the in-bag sample.\n If ``subsample == 1`` this is the deviance on the training data.\n\n loss_ : LossFunction\n The concrete ``LossFunction`` object.\n\n .. deprecated:: 1.1\n Attribute `loss_` was deprecated in version 1.1 and will be\n removed in 1.3.\n\n init_ : estimator\n The estimator that provides the initial predictions.\n Set via the ``init`` argument or ``loss.init_estimator``.\n\n estimators_ : ndarray of DecisionTreeRegressor of shape (n_estimators, ``loss_.K``)\n The collection of fitted sub-estimators. ``loss_.K`` is 1 for binary\n classification, otherwise n_classes.\n\n classes_ : ndarray of shape (n_classes,)\n The classes labels.\n\n n_features_ : int\n The number of data features.\n\n .. deprecated:: 1.0\n Attribute `n_features_` was deprecated in version 1.0 and will be\n removed in 1.2. Use `n_features_in_` instead.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n n_classes_ : int\n The number of classes.\n\n max_features_ : int\n The inferred value of max_features.\n\n See Also\n --------\n HistGradientBoostingClassifier : Histogram-based Gradient Boosting\n Classification Tree.\n sklearn.tree.DecisionTreeClassifier : A decision tree classifier.\n RandomForestClassifier : A meta-estimator that fits a number of decision\n tree classifiers on various sub-samples of the dataset and uses\n averaging to improve the predictive accuracy and control over-fitting.\n AdaBoostClassifier : A meta-estimator that begins by fitting a classifier\n on the original dataset and then fits additional copies of the\n classifier on the same dataset where the weights of incorrectly\n classified instances are adjusted such that subsequent classifiers\n focus more on difficult cases.\n\n Notes\n -----\n The features are always randomly permuted at each split. Therefore,\n the best found split may vary, even with the same training data and\n ``max_features=n_features``, if the improvement of the criterion is\n identical for several splits enumerated during the search of the best\n split. To obtain a deterministic behaviour during fitting,\n ``random_state`` has to be fixed.\n\n References\n ----------\n J. Friedman, Greedy Function Approximation: A Gradient Boosting\n Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.\n\n J. Friedman, Stochastic Gradient Boosting, 1999\n\n T. Hastie, R. Tibshirani and J. Friedman.\n Elements of Statistical Learning Ed. 2, Springer, 2009.\n\n Examples\n --------\n The following example shows how to fit a gradient boosting classifier with\n 100 decision stumps as weak learners.\n\n >>> from sklearn.datasets import make_hastie_10_2\n >>> from sklearn.ensemble import GradientBoostingClassifier\n\n >>> X, y = make_hastie_10_2(random_state=0)\n >>> X_train, X_test = X[:2000], X[2000:]\n >>> y_train, y_test = y[:2000], y[2000:]\n\n >>> clf = GradientBoostingClassifier(n_estimators=100, learning_rate=1.0,\n ... max_depth=1, random_state=0).fit(X_train, y_train)\n >>> clf.score(X_test, y_test)\n 0.913...\n " + "description": "Gradient Boosting for classification.\n\nGB builds an additive model in a\nforward stage-wise fashion; it allows for the optimization of\narbitrary differentiable loss functions. In each stage ``n_classes_``\nregression trees are fit on the negative gradient of the loss function,\ne.g. binary or multiclass log loss. Binary classification\nis a special case where only a single regression tree is induced.\n\nRead more in the :ref:`User Guide `.", + "docstring": "Gradient Boosting for classification.\n\n GB builds an additive model in a\n forward stage-wise fashion; it allows for the optimization of\n arbitrary differentiable loss functions. In each stage ``n_classes_``\n regression trees are fit on the negative gradient of the loss function,\n e.g. binary or multiclass log loss. Binary classification\n is a special case where only a single regression tree is induced.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n loss : {'log_loss', 'deviance', 'exponential'}, default='log_loss'\n The loss function to be optimized. 'log_loss' refers to binomial and\n multinomial deviance, the same as used in logistic regression.\n It is a good choice for classification with probabilistic outputs.\n For loss 'exponential', gradient boosting recovers the AdaBoost algorithm.\n\n .. deprecated:: 1.1\n The loss 'deviance' was deprecated in v1.1 and will be removed in\n version 1.3. Use `loss='log_loss'` which is equivalent.\n\n learning_rate : float, default=0.1\n Learning rate shrinks the contribution of each tree by `learning_rate`.\n There is a trade-off between learning_rate and n_estimators.\n Values must be in the range `(0.0, inf)`.\n\n n_estimators : int, default=100\n The number of boosting stages to perform. Gradient boosting\n is fairly robust to over-fitting so a large number usually\n results in better performance.\n Values must be in the range `[1, inf)`.\n\n subsample : float, default=1.0\n The fraction of samples to be used for fitting the individual base\n learners. If smaller than 1.0 this results in Stochastic Gradient\n Boosting. `subsample` interacts with the parameter `n_estimators`.\n Choosing `subsample < 1.0` leads to a reduction of variance\n and an increase in bias.\n Values must be in the range `(0.0, 1.0]`.\n\n criterion : {'friedman_mse', 'squared_error', 'mse'}, default='friedman_mse'\n The function to measure the quality of a split. Supported criteria are\n 'friedman_mse' for the mean squared error with improvement score by\n Friedman, 'squared_error' for mean squared error. The default value of\n 'friedman_mse' is generally the best as it can provide a better\n approximation in some cases.\n\n .. versionadded:: 0.18\n\n .. deprecated:: 1.0\n Criterion 'mse' was deprecated in v1.0 and will be removed in\n version 1.2. Use `criterion='squared_error'` which is equivalent.\n\n min_samples_split : int or float, default=2\n The minimum number of samples required to split an internal node:\n\n - If int, values must be in the range `[2, inf)`.\n - If float, values must be in the range `(0.0, 1.0]` and `min_samples_split`\n will be `ceil(min_samples_split * n_samples)`.\n\n .. versionchanged:: 0.18\n Added float values for fractions.\n\n min_samples_leaf : int or float, default=1\n The minimum number of samples required to be at a leaf node.\n A split point at any depth will only be considered if it leaves at\n least ``min_samples_leaf`` training samples in each of the left and\n right branches. This may have the effect of smoothing the model,\n especially in regression.\n\n - If int, values must be in the range `[1, inf)`.\n - If float, values must be in the range `(0.0, 1.0]` and `min_samples_leaf`\n will be `ceil(min_samples_leaf * n_samples)`.\n\n .. versionchanged:: 0.18\n Added float values for fractions.\n\n min_weight_fraction_leaf : float, default=0.0\n The minimum weighted fraction of the sum total of weights (of all\n the input samples) required to be at a leaf node. Samples have\n equal weight when sample_weight is not provided.\n Values must be in the range `[0.0, 0.5]`.\n\n max_depth : int, default=3\n The maximum depth of the individual regression estimators. The maximum\n depth limits the number of nodes in the tree. Tune this parameter\n for best performance; the best value depends on the interaction\n of the input variables.\n Values must be in the range `[1, inf)`.\n\n min_impurity_decrease : float, default=0.0\n A node will be split if this split induces a decrease of the impurity\n greater than or equal to this value.\n Values must be in the range `[0.0, inf)`.\n\n The weighted impurity decrease equation is the following::\n\n N_t / N * (impurity - N_t_R / N_t * right_impurity\n - N_t_L / N_t * left_impurity)\n\n where ``N`` is the total number of samples, ``N_t`` is the number of\n samples at the current node, ``N_t_L`` is the number of samples in the\n left child, and ``N_t_R`` is the number of samples in the right child.\n\n ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,\n if ``sample_weight`` is passed.\n\n .. versionadded:: 0.19\n\n init : estimator or 'zero', default=None\n An estimator object that is used to compute the initial predictions.\n ``init`` has to provide :meth:`fit` and :meth:`predict_proba`. If\n 'zero', the initial raw predictions are set to zero. By default, a\n ``DummyEstimator`` predicting the classes priors is used.\n\n random_state : int, RandomState instance or None, default=None\n Controls the random seed given to each Tree estimator at each\n boosting iteration.\n In addition, it controls the random permutation of the features at\n each split (see Notes for more details).\n It also controls the random splitting of the training data to obtain a\n validation set if `n_iter_no_change` is not None.\n Pass an int for reproducible output across multiple function calls.\n See :term:`Glossary `.\n\n max_features : {'auto', 'sqrt', 'log2'}, int or float, default=None\n The number of features to consider when looking for the best split:\n\n - If int, values must be in the range `[1, inf)`.\n - If float, values must be in the range `(0.0, 1.0]` and the features\n considered at each split will be `int(max_features * n_features)`.\n - If 'auto', then `max_features=sqrt(n_features)`.\n - If 'sqrt', then `max_features=sqrt(n_features)`.\n - If 'log2', then `max_features=log2(n_features)`.\n - If None, then `max_features=n_features`.\n\n Choosing `max_features < n_features` leads to a reduction of variance\n and an increase in bias.\n\n Note: the search for a split does not stop until at least one\n valid partition of the node samples is found, even if it requires to\n effectively inspect more than ``max_features`` features.\n\n verbose : int, default=0\n Enable verbose output. If 1 then it prints progress and performance\n once in a while (the more trees the lower the frequency). If greater\n than 1 then it prints progress and performance for every tree.\n Values must be in the range `[0, inf)`.\n\n max_leaf_nodes : int, default=None\n Grow trees with ``max_leaf_nodes`` in best-first fashion.\n Best nodes are defined as relative reduction in impurity.\n Values must be in the range `[2, inf)`.\n If `None`, then unlimited number of leaf nodes.\n\n warm_start : bool, default=False\n When set to ``True``, reuse the solution of the previous call to fit\n and add more estimators to the ensemble, otherwise, just erase the\n previous solution. See :term:`the Glossary `.\n\n validation_fraction : float, default=0.1\n The proportion of training data to set aside as validation set for\n early stopping. Values must be in the range `(0.0, 1.0)`.\n Only used if ``n_iter_no_change`` is set to an integer.\n\n .. versionadded:: 0.20\n\n n_iter_no_change : int, default=None\n ``n_iter_no_change`` is used to decide if early stopping will be used\n to terminate training when validation score is not improving. By\n default it is set to None to disable early stopping. If set to a\n number, it will set aside ``validation_fraction`` size of the training\n data as validation and terminate training when validation score is not\n improving in all of the previous ``n_iter_no_change`` numbers of\n iterations. The split is stratified.\n Values must be in the range `[1, inf)`.\n\n .. versionadded:: 0.20\n\n tol : float, default=1e-4\n Tolerance for the early stopping. When the loss is not improving\n by at least tol for ``n_iter_no_change`` iterations (if set to a\n number), the training stops.\n Values must be in the range `(0.0, inf)`.\n\n .. versionadded:: 0.20\n\n ccp_alpha : non-negative float, default=0.0\n Complexity parameter used for Minimal Cost-Complexity Pruning. The\n subtree with the largest cost complexity that is smaller than\n ``ccp_alpha`` will be chosen. By default, no pruning is performed.\n Values must be in the range `[0.0, inf)`.\n See :ref:`minimal_cost_complexity_pruning` for details.\n\n .. versionadded:: 0.22\n\n Attributes\n ----------\n n_estimators_ : int\n The number of estimators as selected by early stopping (if\n ``n_iter_no_change`` is specified). Otherwise it is set to\n ``n_estimators``.\n\n .. versionadded:: 0.20\n\n feature_importances_ : ndarray of shape (n_features,)\n The impurity-based feature importances.\n The higher, the more important the feature.\n The importance of a feature is computed as the (normalized)\n total reduction of the criterion brought by that feature. It is also\n known as the Gini importance.\n\n Warning: impurity-based feature importances can be misleading for\n high cardinality features (many unique values). See\n :func:`sklearn.inspection.permutation_importance` as an alternative.\n\n oob_improvement_ : ndarray of shape (n_estimators,)\n The improvement in loss (= deviance) on the out-of-bag samples\n relative to the previous iteration.\n ``oob_improvement_[0]`` is the improvement in\n loss of the first stage over the ``init`` estimator.\n Only available if ``subsample < 1.0``\n\n train_score_ : ndarray of shape (n_estimators,)\n The i-th score ``train_score_[i]`` is the deviance (= loss) of the\n model at iteration ``i`` on the in-bag sample.\n If ``subsample == 1`` this is the deviance on the training data.\n\n loss_ : LossFunction\n The concrete ``LossFunction`` object.\n\n .. deprecated:: 1.1\n Attribute `loss_` was deprecated in version 1.1 and will be\n removed in 1.3.\n\n init_ : estimator\n The estimator that provides the initial predictions.\n Set via the ``init`` argument or ``loss.init_estimator``.\n\n estimators_ : ndarray of DecisionTreeRegressor of shape (n_estimators, ``loss_.K``)\n The collection of fitted sub-estimators. ``loss_.K`` is 1 for binary\n classification, otherwise n_classes.\n\n classes_ : ndarray of shape (n_classes,)\n The classes labels.\n\n n_features_ : int\n The number of data features.\n\n .. deprecated:: 1.0\n Attribute `n_features_` was deprecated in version 1.0 and will be\n removed in 1.2. Use `n_features_in_` instead.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n n_classes_ : int\n The number of classes.\n\n max_features_ : int\n The inferred value of max_features.\n\n See Also\n --------\n HistGradientBoostingClassifier : Histogram-based Gradient Boosting\n Classification Tree.\n sklearn.tree.DecisionTreeClassifier : A decision tree classifier.\n RandomForestClassifier : A meta-estimator that fits a number of decision\n tree classifiers on various sub-samples of the dataset and uses\n averaging to improve the predictive accuracy and control over-fitting.\n AdaBoostClassifier : A meta-estimator that begins by fitting a classifier\n on the original dataset and then fits additional copies of the\n classifier on the same dataset where the weights of incorrectly\n classified instances are adjusted such that subsequent classifiers\n focus more on difficult cases.\n\n Notes\n -----\n The features are always randomly permuted at each split. Therefore,\n the best found split may vary, even with the same training data and\n ``max_features=n_features``, if the improvement of the criterion is\n identical for several splits enumerated during the search of the best\n split. To obtain a deterministic behaviour during fitting,\n ``random_state`` has to be fixed.\n\n References\n ----------\n J. Friedman, Greedy Function Approximation: A Gradient Boosting\n Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.\n\n J. Friedman, Stochastic Gradient Boosting, 1999\n\n T. Hastie, R. Tibshirani and J. Friedman.\n Elements of Statistical Learning Ed. 2, Springer, 2009.\n\n Examples\n --------\n The following example shows how to fit a gradient boosting classifier with\n 100 decision stumps as weak learners.\n\n >>> from sklearn.datasets import make_hastie_10_2\n >>> from sklearn.ensemble import GradientBoostingClassifier\n\n >>> X, y = make_hastie_10_2(random_state=0)\n >>> X_train, X_test = X[:2000], X[2000:]\n >>> y_train, y_test = y[:2000], y[2000:]\n\n >>> clf = GradientBoostingClassifier(n_estimators=100, learning_rate=1.0,\n ... max_depth=1, random_state=0).fit(X_train, y_train)\n >>> clf.score(X_test, y_test)\n 0.913...\n " }, { "id": "sklearn/sklearn.ensemble._gb/GradientBoostingRegressor", @@ -22491,8 +22354,8 @@ "sklearn/sklearn.ensemble._gb/GradientBoostingRegressor/apply" ], "is_public": true, - "description": "Gradient Boosting for regression.\n\nThis estimator builds an additive model in a forward stage-wise fashion; it\nallows for the optimization of arbitrary differentiable loss functions. In\neach stage a regression tree is fit on the negative gradient of the given\nloss function.\n\n:class:`sklearn.ensemble.HistGradientBoostingRegressor` is a much faster\nvariant of this algorithm for intermediate datasets (`n_samples >= 10_000`).\n\nRead more in the :ref:`User Guide `.", - "docstring": "Gradient Boosting for regression.\n\n This estimator builds an additive model in a forward stage-wise fashion; it\n allows for the optimization of arbitrary differentiable loss functions. In\n each stage a regression tree is fit on the negative gradient of the given\n loss function.\n\n :class:`sklearn.ensemble.HistGradientBoostingRegressor` is a much faster\n variant of this algorithm for intermediate datasets (`n_samples >= 10_000`).\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n loss : {'squared_error', 'absolute_error', 'huber', 'quantile'}, default='squared_error'\n Loss function to be optimized. 'squared_error' refers to the squared\n error for regression. 'absolute_error' refers to the absolute error of\n regression and is a robust loss function. 'huber' is a\n combination of the two. 'quantile' allows quantile regression (use\n `alpha` to specify the quantile).\n\n .. deprecated:: 1.0\n The loss 'ls' was deprecated in v1.0 and will be removed in\n version 1.2. Use `loss='squared_error'` which is equivalent.\n\n .. deprecated:: 1.0\n The loss 'lad' was deprecated in v1.0 and will be removed in\n version 1.2. Use `loss='absolute_error'` which is equivalent.\n\n learning_rate : float, default=0.1\n Learning rate shrinks the contribution of each tree by `learning_rate`.\n There is a trade-off between learning_rate and n_estimators.\n Values must be in the range `(0.0, inf)`.\n\n n_estimators : int, default=100\n The number of boosting stages to perform. Gradient boosting\n is fairly robust to over-fitting so a large number usually\n results in better performance.\n Values must be in the range `[1, inf)`.\n\n subsample : float, default=1.0\n The fraction of samples to be used for fitting the individual base\n learners. If smaller than 1.0 this results in Stochastic Gradient\n Boosting. `subsample` interacts with the parameter `n_estimators`.\n Choosing `subsample < 1.0` leads to a reduction of variance\n and an increase in bias.\n Values must be in the range `(0.0, 1.0]`.\n\n criterion : {'friedman_mse', 'squared_error', 'mse'}, default='friedman_mse'\n The function to measure the quality of a split. Supported criteria are\n \"friedman_mse\" for the mean squared error with improvement score by\n Friedman, \"squared_error\" for mean squared error. The default value of\n \"friedman_mse\" is generally the best as it can provide a better\n approximation in some cases.\n\n .. versionadded:: 0.18\n\n .. deprecated:: 1.0\n Criterion 'mse' was deprecated in v1.0 and will be removed in\n version 1.2. Use `criterion='squared_error'` which is equivalent.\n\n min_samples_split : int or float, default=2\n The minimum number of samples required to split an internal node:\n\n - If int, values must be in the range `[2, inf)`.\n - If float, values must be in the range `(0.0, 1.0]` and `min_samples_split`\n will be `ceil(min_samples_split * n_samples)`.\n\n .. versionchanged:: 0.18\n Added float values for fractions.\n\n min_samples_leaf : int or float, default=1\n The minimum number of samples required to be at a leaf node.\n A split point at any depth will only be considered if it leaves at\n least ``min_samples_leaf`` training samples in each of the left and\n right branches. This may have the effect of smoothing the model,\n especially in regression.\n\n - If int, values must be in the range `[1, inf)`.\n - If float, values must be in the range `(0.0, 1.0]` and `min_samples_leaf`\n will be `ceil(min_samples_leaf * n_samples)`.\n\n .. versionchanged:: 0.18\n Added float values for fractions.\n\n min_weight_fraction_leaf : float, default=0.0\n The minimum weighted fraction of the sum total of weights (of all\n the input samples) required to be at a leaf node. Samples have\n equal weight when sample_weight is not provided.\n Values must be in the range `[0.0, 0.5]`.\n\n max_depth : int, default=3\n Maximum depth of the individual regression estimators. The maximum\n depth limits the number of nodes in the tree. Tune this parameter\n for best performance; the best value depends on the interaction\n of the input variables.\n Values must be in the range `[1, inf)`.\n\n min_impurity_decrease : float, default=0.0\n A node will be split if this split induces a decrease of the impurity\n greater than or equal to this value.\n Values must be in the range `[0.0, inf)`.\n\n The weighted impurity decrease equation is the following::\n\n N_t / N * (impurity - N_t_R / N_t * right_impurity\n - N_t_L / N_t * left_impurity)\n\n where ``N`` is the total number of samples, ``N_t`` is the number of\n samples at the current node, ``N_t_L`` is the number of samples in the\n left child, and ``N_t_R`` is the number of samples in the right child.\n\n ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,\n if ``sample_weight`` is passed.\n\n .. versionadded:: 0.19\n\n init : estimator or 'zero', default=None\n An estimator object that is used to compute the initial predictions.\n ``init`` has to provide :term:`fit` and :term:`predict`. If 'zero', the\n initial raw predictions are set to zero. By default a\n ``DummyEstimator`` is used, predicting either the average target value\n (for loss='squared_error'), or a quantile for the other losses.\n\n random_state : int, RandomState instance or None, default=None\n Controls the random seed given to each Tree estimator at each\n boosting iteration.\n In addition, it controls the random permutation of the features at\n each split (see Notes for more details).\n It also controls the random splitting of the training data to obtain a\n validation set if `n_iter_no_change` is not None.\n Pass an int for reproducible output across multiple function calls.\n See :term:`Glossary `.\n\n max_features : {'auto', 'sqrt', 'log2'}, int or float, default=None\n The number of features to consider when looking for the best split:\n\n - If int, values must be in the range `[1, inf)`.\n - If float, values must be in the range `(0.0, 1.0]` and the features\n considered at each split will be `int(max_features * n_features)`.\n - If \"auto\", then `max_features=n_features`.\n - If \"sqrt\", then `max_features=sqrt(n_features)`.\n - If \"log2\", then `max_features=log2(n_features)`.\n - If None, then `max_features=n_features`.\n\n Choosing `max_features < n_features` leads to a reduction of variance\n and an increase in bias.\n\n Note: the search for a split does not stop until at least one\n valid partition of the node samples is found, even if it requires to\n effectively inspect more than ``max_features`` features.\n\n alpha : float, default=0.9\n The alpha-quantile of the huber loss function and the quantile\n loss function. Only if ``loss='huber'`` or ``loss='quantile'``.\n Values must be in the range `(0.0, 1.0)`.\n\n verbose : int, default=0\n Enable verbose output. If 1 then it prints progress and performance\n once in a while (the more trees the lower the frequency). If greater\n than 1 then it prints progress and performance for every tree.\n Values must be in the range `[0, inf)`.\n\n max_leaf_nodes : int, default=None\n Grow trees with ``max_leaf_nodes`` in best-first fashion.\n Best nodes are defined as relative reduction in impurity.\n Values must be in the range `[2, inf)`.\n If None, then unlimited number of leaf nodes.\n\n warm_start : bool, default=False\n When set to ``True``, reuse the solution of the previous call to fit\n and add more estimators to the ensemble, otherwise, just erase the\n previous solution. See :term:`the Glossary `.\n\n validation_fraction : float, default=0.1\n The proportion of training data to set aside as validation set for\n early stopping. Values must be in the range `(0.0, 1.0)`.\n Only used if ``n_iter_no_change`` is set to an integer.\n\n .. versionadded:: 0.20\n\n n_iter_no_change : int, default=None\n ``n_iter_no_change`` is used to decide if early stopping will be used\n to terminate training when validation score is not improving. By\n default it is set to None to disable early stopping. If set to a\n number, it will set aside ``validation_fraction`` size of the training\n data as validation and terminate training when validation score is not\n improving in all of the previous ``n_iter_no_change`` numbers of\n iterations.\n Values must be in the range `[1, inf)`.\n\n .. versionadded:: 0.20\n\n tol : float, default=1e-4\n Tolerance for the early stopping. When the loss is not improving\n by at least tol for ``n_iter_no_change`` iterations (if set to a\n number), the training stops.\n Values must be in the range `(0.0, inf)`.\n\n .. versionadded:: 0.20\n\n ccp_alpha : non-negative float, default=0.0\n Complexity parameter used for Minimal Cost-Complexity Pruning. The\n subtree with the largest cost complexity that is smaller than\n ``ccp_alpha`` will be chosen. By default, no pruning is performed.\n Values must be in the range `[0.0, inf)`.\n See :ref:`minimal_cost_complexity_pruning` for details.\n\n .. versionadded:: 0.22\n\n Attributes\n ----------\n feature_importances_ : ndarray of shape (n_features,)\n The impurity-based feature importances.\n The higher, the more important the feature.\n The importance of a feature is computed as the (normalized)\n total reduction of the criterion brought by that feature. It is also\n known as the Gini importance.\n\n Warning: impurity-based feature importances can be misleading for\n high cardinality features (many unique values). See\n :func:`sklearn.inspection.permutation_importance` as an alternative.\n\n oob_improvement_ : ndarray of shape (n_estimators,)\n The improvement in loss (= deviance) on the out-of-bag samples\n relative to the previous iteration.\n ``oob_improvement_[0]`` is the improvement in\n loss of the first stage over the ``init`` estimator.\n Only available if ``subsample < 1.0``\n\n train_score_ : ndarray of shape (n_estimators,)\n The i-th score ``train_score_[i]`` is the deviance (= loss) of the\n model at iteration ``i`` on the in-bag sample.\n If ``subsample == 1`` this is the deviance on the training data.\n\n loss_ : LossFunction\n The concrete ``LossFunction`` object.\n\n .. deprecated:: 1.1\n Attribute `loss_` was deprecated in version 1.1 and will be\n removed in 1.3.\n\n init_ : estimator\n The estimator that provides the initial predictions.\n Set via the ``init`` argument or ``loss.init_estimator``.\n\n estimators_ : ndarray of DecisionTreeRegressor of shape (n_estimators, 1)\n The collection of fitted sub-estimators.\n\n n_estimators_ : int\n The number of estimators as selected by early stopping (if\n ``n_iter_no_change`` is specified). Otherwise it is set to\n ``n_estimators``.\n\n n_features_ : int\n The number of data features.\n\n .. deprecated:: 1.0\n Attribute `n_features_` was deprecated in version 1.0 and will be\n removed in 1.2. Use `n_features_in_` instead.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n max_features_ : int\n The inferred value of max_features.\n\n See Also\n --------\n HistGradientBoostingRegressor : Histogram-based Gradient Boosting\n Classification Tree.\n sklearn.tree.DecisionTreeRegressor : A decision tree regressor.\n sklearn.ensemble.RandomForestRegressor : A random forest regressor.\n\n Notes\n -----\n The features are always randomly permuted at each split. Therefore,\n the best found split may vary, even with the same training data and\n ``max_features=n_features``, if the improvement of the criterion is\n identical for several splits enumerated during the search of the best\n split. To obtain a deterministic behaviour during fitting,\n ``random_state`` has to be fixed.\n\n References\n ----------\n J. Friedman, Greedy Function Approximation: A Gradient Boosting\n Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.\n\n J. Friedman, Stochastic Gradient Boosting, 1999\n\n T. Hastie, R. Tibshirani and J. Friedman.\n Elements of Statistical Learning Ed. 2, Springer, 2009.\n\n Examples\n --------\n >>> from sklearn.datasets import make_regression\n >>> from sklearn.ensemble import GradientBoostingRegressor\n >>> from sklearn.model_selection import train_test_split\n >>> X, y = make_regression(random_state=0)\n >>> X_train, X_test, y_train, y_test = train_test_split(\n ... X, y, random_state=0)\n >>> reg = GradientBoostingRegressor(random_state=0)\n >>> reg.fit(X_train, y_train)\n GradientBoostingRegressor(random_state=0)\n >>> reg.predict(X_test[1:2])\n array([-61...])\n >>> reg.score(X_test, y_test)\n 0.4...\n " + "description": "Gradient Boosting for regression.\n\nGB builds an additive model in a forward stage-wise fashion;\nit allows for the optimization of arbitrary differentiable loss functions.\nIn each stage a regression tree is fit on the negative gradient of the\ngiven loss function.\n\nRead more in the :ref:`User Guide `.", + "docstring": "Gradient Boosting for regression.\n\n GB builds an additive model in a forward stage-wise fashion;\n it allows for the optimization of arbitrary differentiable loss functions.\n In each stage a regression tree is fit on the negative gradient of the\n given loss function.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n loss : {'squared_error', 'absolute_error', 'huber', 'quantile'}, default='squared_error'\n Loss function to be optimized. 'squared_error' refers to the squared\n error for regression. 'absolute_error' refers to the absolute error of\n regression and is a robust loss function. 'huber' is a\n combination of the two. 'quantile' allows quantile regression (use\n `alpha` to specify the quantile).\n\n .. deprecated:: 1.0\n The loss 'ls' was deprecated in v1.0 and will be removed in\n version 1.2. Use `loss='squared_error'` which is equivalent.\n\n .. deprecated:: 1.0\n The loss 'lad' was deprecated in v1.0 and will be removed in\n version 1.2. Use `loss='absolute_error'` which is equivalent.\n\n learning_rate : float, default=0.1\n Learning rate shrinks the contribution of each tree by `learning_rate`.\n There is a trade-off between learning_rate and n_estimators.\n Values must be in the range `(0.0, inf)`.\n\n n_estimators : int, default=100\n The number of boosting stages to perform. Gradient boosting\n is fairly robust to over-fitting so a large number usually\n results in better performance.\n Values must be in the range `[1, inf)`.\n\n subsample : float, default=1.0\n The fraction of samples to be used for fitting the individual base\n learners. If smaller than 1.0 this results in Stochastic Gradient\n Boosting. `subsample` interacts with the parameter `n_estimators`.\n Choosing `subsample < 1.0` leads to a reduction of variance\n and an increase in bias.\n Values must be in the range `(0.0, 1.0]`.\n\n criterion : {'friedman_mse', 'squared_error', 'mse'}, default='friedman_mse'\n The function to measure the quality of a split. Supported criteria are\n \"friedman_mse\" for the mean squared error with improvement score by\n Friedman, \"squared_error\" for mean squared error. The default value of\n \"friedman_mse\" is generally the best as it can provide a better\n approximation in some cases.\n\n .. versionadded:: 0.18\n\n .. deprecated:: 1.0\n Criterion 'mse' was deprecated in v1.0 and will be removed in\n version 1.2. Use `criterion='squared_error'` which is equivalent.\n\n min_samples_split : int or float, default=2\n The minimum number of samples required to split an internal node:\n\n - If int, values must be in the range `[2, inf)`.\n - If float, values must be in the range `(0.0, 1.0]` and `min_samples_split`\n will be `ceil(min_samples_split * n_samples)`.\n\n .. versionchanged:: 0.18\n Added float values for fractions.\n\n min_samples_leaf : int or float, default=1\n The minimum number of samples required to be at a leaf node.\n A split point at any depth will only be considered if it leaves at\n least ``min_samples_leaf`` training samples in each of the left and\n right branches. This may have the effect of smoothing the model,\n especially in regression.\n\n - If int, values must be in the range `[1, inf)`.\n - If float, values must be in the range `(0.0, 1.0]` and `min_samples_leaf`\n will be `ceil(min_samples_leaf * n_samples)`.\n\n .. versionchanged:: 0.18\n Added float values for fractions.\n\n min_weight_fraction_leaf : float, default=0.0\n The minimum weighted fraction of the sum total of weights (of all\n the input samples) required to be at a leaf node. Samples have\n equal weight when sample_weight is not provided.\n Values must be in the range `[0.0, 0.5]`.\n\n max_depth : int, default=3\n Maximum depth of the individual regression estimators. The maximum\n depth limits the number of nodes in the tree. Tune this parameter\n for best performance; the best value depends on the interaction\n of the input variables.\n Values must be in the range `[1, inf)`.\n\n min_impurity_decrease : float, default=0.0\n A node will be split if this split induces a decrease of the impurity\n greater than or equal to this value.\n Values must be in the range `[0.0, inf)`.\n\n The weighted impurity decrease equation is the following::\n\n N_t / N * (impurity - N_t_R / N_t * right_impurity\n - N_t_L / N_t * left_impurity)\n\n where ``N`` is the total number of samples, ``N_t`` is the number of\n samples at the current node, ``N_t_L`` is the number of samples in the\n left child, and ``N_t_R`` is the number of samples in the right child.\n\n ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,\n if ``sample_weight`` is passed.\n\n .. versionadded:: 0.19\n\n init : estimator or 'zero', default=None\n An estimator object that is used to compute the initial predictions.\n ``init`` has to provide :term:`fit` and :term:`predict`. If 'zero', the\n initial raw predictions are set to zero. By default a\n ``DummyEstimator`` is used, predicting either the average target value\n (for loss='squared_error'), or a quantile for the other losses.\n\n random_state : int, RandomState instance or None, default=None\n Controls the random seed given to each Tree estimator at each\n boosting iteration.\n In addition, it controls the random permutation of the features at\n each split (see Notes for more details).\n It also controls the random splitting of the training data to obtain a\n validation set if `n_iter_no_change` is not None.\n Pass an int for reproducible output across multiple function calls.\n See :term:`Glossary `.\n\n max_features : {'auto', 'sqrt', 'log2'}, int or float, default=None\n The number of features to consider when looking for the best split:\n\n - If int, values must be in the range `[1, inf)`.\n - If float, values must be in the range `(0.0, 1.0]` and the features\n considered at each split will be `int(max_features * n_features)`.\n - If \"auto\", then `max_features=n_features`.\n - If \"sqrt\", then `max_features=sqrt(n_features)`.\n - If \"log2\", then `max_features=log2(n_features)`.\n - If None, then `max_features=n_features`.\n\n Choosing `max_features < n_features` leads to a reduction of variance\n and an increase in bias.\n\n Note: the search for a split does not stop until at least one\n valid partition of the node samples is found, even if it requires to\n effectively inspect more than ``max_features`` features.\n\n alpha : float, default=0.9\n The alpha-quantile of the huber loss function and the quantile\n loss function. Only if ``loss='huber'`` or ``loss='quantile'``.\n Values must be in the range `(0.0, 1.0)`.\n\n verbose : int, default=0\n Enable verbose output. If 1 then it prints progress and performance\n once in a while (the more trees the lower the frequency). If greater\n than 1 then it prints progress and performance for every tree.\n Values must be in the range `[0, inf)`.\n\n max_leaf_nodes : int, default=None\n Grow trees with ``max_leaf_nodes`` in best-first fashion.\n Best nodes are defined as relative reduction in impurity.\n Values must be in the range `[2, inf)`.\n If None, then unlimited number of leaf nodes.\n\n warm_start : bool, default=False\n When set to ``True``, reuse the solution of the previous call to fit\n and add more estimators to the ensemble, otherwise, just erase the\n previous solution. See :term:`the Glossary `.\n\n validation_fraction : float, default=0.1\n The proportion of training data to set aside as validation set for\n early stopping. Values must be in the range `(0.0, 1.0)`.\n Only used if ``n_iter_no_change`` is set to an integer.\n\n .. versionadded:: 0.20\n\n n_iter_no_change : int, default=None\n ``n_iter_no_change`` is used to decide if early stopping will be used\n to terminate training when validation score is not improving. By\n default it is set to None to disable early stopping. If set to a\n number, it will set aside ``validation_fraction`` size of the training\n data as validation and terminate training when validation score is not\n improving in all of the previous ``n_iter_no_change`` numbers of\n iterations.\n Values must be in the range `[1, inf)`.\n\n .. versionadded:: 0.20\n\n tol : float, default=1e-4\n Tolerance for the early stopping. When the loss is not improving\n by at least tol for ``n_iter_no_change`` iterations (if set to a\n number), the training stops.\n Values must be in the range `(0.0, inf)`.\n\n .. versionadded:: 0.20\n\n ccp_alpha : non-negative float, default=0.0\n Complexity parameter used for Minimal Cost-Complexity Pruning. The\n subtree with the largest cost complexity that is smaller than\n ``ccp_alpha`` will be chosen. By default, no pruning is performed.\n Values must be in the range `[0.0, inf)`.\n See :ref:`minimal_cost_complexity_pruning` for details.\n\n .. versionadded:: 0.22\n\n Attributes\n ----------\n feature_importances_ : ndarray of shape (n_features,)\n The impurity-based feature importances.\n The higher, the more important the feature.\n The importance of a feature is computed as the (normalized)\n total reduction of the criterion brought by that feature. It is also\n known as the Gini importance.\n\n Warning: impurity-based feature importances can be misleading for\n high cardinality features (many unique values). See\n :func:`sklearn.inspection.permutation_importance` as an alternative.\n\n oob_improvement_ : ndarray of shape (n_estimators,)\n The improvement in loss (= deviance) on the out-of-bag samples\n relative to the previous iteration.\n ``oob_improvement_[0]`` is the improvement in\n loss of the first stage over the ``init`` estimator.\n Only available if ``subsample < 1.0``\n\n train_score_ : ndarray of shape (n_estimators,)\n The i-th score ``train_score_[i]`` is the deviance (= loss) of the\n model at iteration ``i`` on the in-bag sample.\n If ``subsample == 1`` this is the deviance on the training data.\n\n loss_ : LossFunction\n The concrete ``LossFunction`` object.\n\n .. deprecated:: 1.1\n Attribute `loss_` was deprecated in version 1.1 and will be\n removed in 1.3.\n\n init_ : estimator\n The estimator that provides the initial predictions.\n Set via the ``init`` argument or ``loss.init_estimator``.\n\n estimators_ : ndarray of DecisionTreeRegressor of shape (n_estimators, 1)\n The collection of fitted sub-estimators.\n\n n_estimators_ : int\n The number of estimators as selected by early stopping (if\n ``n_iter_no_change`` is specified). Otherwise it is set to\n ``n_estimators``.\n\n n_features_ : int\n The number of data features.\n\n .. deprecated:: 1.0\n Attribute `n_features_` was deprecated in version 1.0 and will be\n removed in 1.2. Use `n_features_in_` instead.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n max_features_ : int\n The inferred value of max_features.\n\n See Also\n --------\n HistGradientBoostingRegressor : Histogram-based Gradient Boosting\n Classification Tree.\n sklearn.tree.DecisionTreeRegressor : A decision tree regressor.\n sklearn.ensemble.RandomForestRegressor : A random forest regressor.\n\n Notes\n -----\n The features are always randomly permuted at each split. Therefore,\n the best found split may vary, even with the same training data and\n ``max_features=n_features``, if the improvement of the criterion is\n identical for several splits enumerated during the search of the best\n split. To obtain a deterministic behaviour during fitting,\n ``random_state`` has to be fixed.\n\n References\n ----------\n J. Friedman, Greedy Function Approximation: A Gradient Boosting\n Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.\n\n J. Friedman, Stochastic Gradient Boosting, 1999\n\n T. Hastie, R. Tibshirani and J. Friedman.\n Elements of Statistical Learning Ed. 2, Springer, 2009.\n\n Examples\n --------\n >>> from sklearn.datasets import make_regression\n >>> from sklearn.ensemble import GradientBoostingRegressor\n >>> from sklearn.model_selection import train_test_split\n >>> X, y = make_regression(random_state=0)\n >>> X_train, X_test, y_train, y_test = train_test_split(\n ... X, y, random_state=0)\n >>> reg = GradientBoostingRegressor(random_state=0)\n >>> reg.fit(X_train, y_train)\n GradientBoostingRegressor(random_state=0)\n >>> reg.predict(X_test[1:2])\n array([-61...])\n >>> reg.score(X_test, y_test)\n 0.4...\n " }, { "id": "sklearn/sklearn.ensemble._gb/VerboseReporter", @@ -22846,7 +22709,7 @@ ], "is_public": true, "description": "Isolation Forest Algorithm.\n\nReturn the anomaly score of each sample using the IsolationForest algorithm\n\nThe IsolationForest 'isolates' observations by randomly selecting a feature\nand then randomly selecting a split value between the maximum and minimum\nvalues of the selected feature.\n\nSince recursive partitioning can be represented by a tree structure, the\nnumber of splittings required to isolate a sample is equivalent to the path\nlength from the root node to the terminating node.\n\nThis path length, averaged over a forest of such random trees, is a\nmeasure of normality and our decision function.\n\nRandom partitioning produces noticeably shorter paths for anomalies.\nHence, when a forest of random trees collectively produce shorter path\nlengths for particular samples, they are highly likely to be anomalies.\n\nRead more in the :ref:`User Guide `.\n\n.. versionadded:: 0.18", - "docstring": "\n Isolation Forest Algorithm.\n\n Return the anomaly score of each sample using the IsolationForest algorithm\n\n The IsolationForest 'isolates' observations by randomly selecting a feature\n and then randomly selecting a split value between the maximum and minimum\n values of the selected feature.\n\n Since recursive partitioning can be represented by a tree structure, the\n number of splittings required to isolate a sample is equivalent to the path\n length from the root node to the terminating node.\n\n This path length, averaged over a forest of such random trees, is a\n measure of normality and our decision function.\n\n Random partitioning produces noticeably shorter paths for anomalies.\n Hence, when a forest of random trees collectively produce shorter path\n lengths for particular samples, they are highly likely to be anomalies.\n\n Read more in the :ref:`User Guide `.\n\n .. versionadded:: 0.18\n\n Parameters\n ----------\n n_estimators : int, default=100\n The number of base estimators in the ensemble.\n\n max_samples : \"auto\", int or float, default=\"auto\"\n The number of samples to draw from X to train each base estimator.\n - If int, then draw `max_samples` samples.\n - If float, then draw `max_samples * X.shape[0]` samples.\n - If \"auto\", then `max_samples=min(256, n_samples)`.\n\n If max_samples is larger than the number of samples provided,\n all samples will be used for all trees (no sampling).\n\n contamination : 'auto' or float, default='auto'\n The amount of contamination of the data set, i.e. the proportion\n of outliers in the data set. Used when fitting to define the threshold\n on the scores of the samples.\n\n - If 'auto', the threshold is determined as in the\n original paper.\n - If float, the contamination should be in the range (0, 0.5].\n\n .. versionchanged:: 0.22\n The default value of ``contamination`` changed from 0.1\n to ``'auto'``.\n\n max_features : int or float, default=1.0\n The number of features to draw from X to train each base estimator.\n\n - If int, then draw `max_features` features.\n - If float, then draw `max_features * X.shape[1]` features.\n\n Note: using a float number less than 1.0 or integer less than number of\n features will enable feature subsampling and leads to a longerr runtime.\n\n bootstrap : bool, default=False\n If True, individual trees are fit on random subsets of the training\n data sampled with replacement. If False, sampling without replacement\n is performed.\n\n n_jobs : int, default=None\n The number of jobs to run in parallel for both :meth:`fit` and\n :meth:`predict`. ``None`` means 1 unless in a\n :obj:`joblib.parallel_backend` context. ``-1`` means using all\n processors. See :term:`Glossary ` for more details.\n\n random_state : int, RandomState instance or None, default=None\n Controls the pseudo-randomness of the selection of the feature\n and split values for each branching step and each tree in the forest.\n\n Pass an int for reproducible results across multiple function calls.\n See :term:`Glossary `.\n\n verbose : int, default=0\n Controls the verbosity of the tree building process.\n\n warm_start : bool, default=False\n When set to ``True``, reuse the solution of the previous call to fit\n and add more estimators to the ensemble, otherwise, just fit a whole\n new forest. See :term:`the Glossary `.\n\n .. versionadded:: 0.21\n\n Attributes\n ----------\n base_estimator_ : ExtraTreeRegressor instance\n The child estimator template used to create the collection of\n fitted sub-estimators.\n\n estimators_ : list of ExtraTreeRegressor instances\n The collection of fitted sub-estimators.\n\n estimators_features_ : list of ndarray\n The subset of drawn features for each base estimator.\n\n estimators_samples_ : list of ndarray\n The subset of drawn samples (i.e., the in-bag samples) for each base\n estimator.\n\n max_samples_ : int\n The actual number of samples.\n\n offset_ : float\n Offset used to define the decision function from the raw scores. We\n have the relation: ``decision_function = score_samples - offset_``.\n ``offset_`` is defined as follows. When the contamination parameter is\n set to \"auto\", the offset is equal to -0.5 as the scores of inliers are\n close to 0 and the scores of outliers are close to -1. When a\n contamination parameter different than \"auto\" is provided, the offset\n is defined in such a way we obtain the expected number of outliers\n (samples with decision function < 0) in training.\n\n .. versionadded:: 0.20\n\n n_features_ : int\n The number of features when ``fit`` is performed.\n\n .. deprecated:: 1.0\n Attribute `n_features_` was deprecated in version 1.0 and will be\n removed in 1.2. Use `n_features_in_` instead.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n See Also\n --------\n sklearn.covariance.EllipticEnvelope : An object for detecting outliers in a\n Gaussian distributed dataset.\n sklearn.svm.OneClassSVM : Unsupervised Outlier Detection.\n Estimate the support of a high-dimensional distribution.\n The implementation is based on libsvm.\n sklearn.neighbors.LocalOutlierFactor : Unsupervised Outlier Detection\n using Local Outlier Factor (LOF).\n\n Notes\n -----\n The implementation is based on an ensemble of ExtraTreeRegressor. The\n maximum depth of each tree is set to ``ceil(log_2(n))`` where\n :math:`n` is the number of samples used to build the tree\n (see (Liu et al., 2008) for more details).\n\n References\n ----------\n .. [1] Liu, Fei Tony, Ting, Kai Ming and Zhou, Zhi-Hua. \"Isolation forest.\"\n Data Mining, 2008. ICDM'08. Eighth IEEE International Conference on.\n .. [2] Liu, Fei Tony, Ting, Kai Ming and Zhou, Zhi-Hua. \"Isolation-based\n anomaly detection.\" ACM Transactions on Knowledge Discovery from\n Data (TKDD) 6.1 (2012): 3.\n\n Examples\n --------\n >>> from sklearn.ensemble import IsolationForest\n >>> X = [[-1.1], [0.3], [0.5], [100]]\n >>> clf = IsolationForest(random_state=0).fit(X)\n >>> clf.predict([[0.1], [0], [90]])\n array([ 1, 1, -1])\n " + "docstring": "\n Isolation Forest Algorithm.\n\n Return the anomaly score of each sample using the IsolationForest algorithm\n\n The IsolationForest 'isolates' observations by randomly selecting a feature\n and then randomly selecting a split value between the maximum and minimum\n values of the selected feature.\n\n Since recursive partitioning can be represented by a tree structure, the\n number of splittings required to isolate a sample is equivalent to the path\n length from the root node to the terminating node.\n\n This path length, averaged over a forest of such random trees, is a\n measure of normality and our decision function.\n\n Random partitioning produces noticeably shorter paths for anomalies.\n Hence, when a forest of random trees collectively produce shorter path\n lengths for particular samples, they are highly likely to be anomalies.\n\n Read more in the :ref:`User Guide `.\n\n .. versionadded:: 0.18\n\n Parameters\n ----------\n n_estimators : int, default=100\n The number of base estimators in the ensemble.\n\n max_samples : \"auto\", int or float, default=\"auto\"\n The number of samples to draw from X to train each base estimator.\n - If int, then draw `max_samples` samples.\n - If float, then draw `max_samples * X.shape[0]` samples.\n - If \"auto\", then `max_samples=min(256, n_samples)`.\n\n If max_samples is larger than the number of samples provided,\n all samples will be used for all trees (no sampling).\n\n contamination : 'auto' or float, default='auto'\n The amount of contamination of the data set, i.e. the proportion\n of outliers in the data set. Used when fitting to define the threshold\n on the scores of the samples.\n\n - If 'auto', the threshold is determined as in the\n original paper.\n - If float, the contamination should be in the range (0, 0.5].\n\n .. versionchanged:: 0.22\n The default value of ``contamination`` changed from 0.1\n to ``'auto'``.\n\n max_features : int or float, default=1.0\n The number of features to draw from X to train each base estimator.\n\n - If int, then draw `max_features` features.\n - If float, then draw `max_features * X.shape[1]` features.\n\n bootstrap : bool, default=False\n If True, individual trees are fit on random subsets of the training\n data sampled with replacement. If False, sampling without replacement\n is performed.\n\n n_jobs : int, default=None\n The number of jobs to run in parallel for both :meth:`fit` and\n :meth:`predict`. ``None`` means 1 unless in a\n :obj:`joblib.parallel_backend` context. ``-1`` means using all\n processors. See :term:`Glossary ` for more details.\n\n random_state : int, RandomState instance or None, default=None\n Controls the pseudo-randomness of the selection of the feature\n and split values for each branching step and each tree in the forest.\n\n Pass an int for reproducible results across multiple function calls.\n See :term:`Glossary `.\n\n verbose : int, default=0\n Controls the verbosity of the tree building process.\n\n warm_start : bool, default=False\n When set to ``True``, reuse the solution of the previous call to fit\n and add more estimators to the ensemble, otherwise, just fit a whole\n new forest. See :term:`the Glossary `.\n\n .. versionadded:: 0.21\n\n Attributes\n ----------\n base_estimator_ : ExtraTreeRegressor instance\n The child estimator template used to create the collection of\n fitted sub-estimators.\n\n estimators_ : list of ExtraTreeRegressor instances\n The collection of fitted sub-estimators.\n\n estimators_features_ : list of ndarray\n The subset of drawn features for each base estimator.\n\n estimators_samples_ : list of ndarray\n The subset of drawn samples (i.e., the in-bag samples) for each base\n estimator.\n\n max_samples_ : int\n The actual number of samples.\n\n offset_ : float\n Offset used to define the decision function from the raw scores. We\n have the relation: ``decision_function = score_samples - offset_``.\n ``offset_`` is defined as follows. When the contamination parameter is\n set to \"auto\", the offset is equal to -0.5 as the scores of inliers are\n close to 0 and the scores of outliers are close to -1. When a\n contamination parameter different than \"auto\" is provided, the offset\n is defined in such a way we obtain the expected number of outliers\n (samples with decision function < 0) in training.\n\n .. versionadded:: 0.20\n\n n_features_ : int\n The number of features when ``fit`` is performed.\n\n .. deprecated:: 1.0\n Attribute `n_features_` was deprecated in version 1.0 and will be\n removed in 1.2. Use `n_features_in_` instead.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n See Also\n --------\n sklearn.covariance.EllipticEnvelope : An object for detecting outliers in a\n Gaussian distributed dataset.\n sklearn.svm.OneClassSVM : Unsupervised Outlier Detection.\n Estimate the support of a high-dimensional distribution.\n The implementation is based on libsvm.\n sklearn.neighbors.LocalOutlierFactor : Unsupervised Outlier Detection\n using Local Outlier Factor (LOF).\n\n Notes\n -----\n The implementation is based on an ensemble of ExtraTreeRegressor. The\n maximum depth of each tree is set to ``ceil(log_2(n))`` where\n :math:`n` is the number of samples used to build the tree\n (see (Liu et al., 2008) for more details).\n\n References\n ----------\n .. [1] Liu, Fei Tony, Ting, Kai Ming and Zhou, Zhi-Hua. \"Isolation forest.\"\n Data Mining, 2008. ICDM'08. Eighth IEEE International Conference on.\n .. [2] Liu, Fei Tony, Ting, Kai Ming and Zhou, Zhi-Hua. \"Isolation-based\n anomaly detection.\" ACM Transactions on Knowledge Discovery from\n Data (TKDD) 6.1 (2012): 3.\n\n Examples\n --------\n >>> from sklearn.ensemble import IsolationForest\n >>> X = [[-1.1], [0.3], [0.5], [100]]\n >>> clf = IsolationForest(random_state=0).fit(X)\n >>> clf.predict([[0.1], [0], [90]])\n array([ 1, 1, -1])\n " }, { "id": "sklearn/sklearn.ensemble._stacking/StackingClassifier", @@ -24627,7 +24490,6 @@ "superclasses": ["RegressorMixin", "LinearModel"], "methods": [ "sklearn/sklearn.linear_model._bayes/BayesianRidge/__init__", - "sklearn/sklearn.linear_model._bayes/BayesianRidge/_check_params", "sklearn/sklearn.linear_model._bayes/BayesianRidge/fit", "sklearn/sklearn.linear_model._bayes/BayesianRidge/predict", "sklearn/sklearn.linear_model._bayes/BayesianRidge/_update_coef_", @@ -26627,7 +26489,7 @@ ], "is_public": true, "description": "Kernel Density Estimation.\n\nRead more in the :ref:`User Guide `.", - "docstring": "Kernel Density Estimation.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n bandwidth : float or {\"scott\", \"silverman\"}, default=1.0\n The bandwidth of the kernel. If bandwidth is a float, it defines the\n bandwidth of the kernel. If bandwidth is a string, one of the estimation\n methods is implemented.\n\n algorithm : {'kd_tree', 'ball_tree', 'auto'}, default='auto'\n The tree algorithm to use.\n\n kernel : {'gaussian', 'tophat', 'epanechnikov', 'exponential', 'linear', 'cosine'}, default='gaussian'\n The kernel to use.\n\n metric : str, default='euclidean'\n The distance metric to use. Note that not all metrics are\n valid with all algorithms. Refer to the documentation of\n :class:`BallTree` and :class:`KDTree` for a description of\n available algorithms. Note that the normalization of the density\n output is correct only for the Euclidean distance metric. Default\n is 'euclidean'.\n\n atol : float, default=0\n The desired absolute tolerance of the result. A larger tolerance will\n generally lead to faster execution.\n\n rtol : float, default=0\n The desired relative tolerance of the result. A larger tolerance will\n generally lead to faster execution.\n\n breadth_first : bool, default=True\n If true (default), use a breadth-first approach to the problem.\n Otherwise use a depth-first approach.\n\n leaf_size : int, default=40\n Specify the leaf size of the underlying tree. See :class:`BallTree`\n or :class:`KDTree` for details.\n\n metric_params : dict, default=None\n Additional parameters to be passed to the tree for use with the\n metric. For more information, see the documentation of\n :class:`BallTree` or :class:`KDTree`.\n\n Attributes\n ----------\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n tree_ : ``BinaryTree`` instance\n The tree algorithm for fast generalized N-point problems.\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n bandwidth_ : float\n Value of the bandwidth, given directly by the bandwidth parameter or\n estimated using the 'scott' or 'silvermann' method.\n\n .. versionadded:: 1.0\n\n See Also\n --------\n sklearn.neighbors.KDTree : K-dimensional tree for fast generalized N-point\n problems.\n sklearn.neighbors.BallTree : Ball tree for fast generalized N-point\n problems.\n\n Examples\n --------\n Compute a gaussian kernel density estimate with a fixed bandwidth.\n\n >>> from sklearn.neighbors import KernelDensity\n >>> import numpy as np\n >>> rng = np.random.RandomState(42)\n >>> X = rng.random_sample((100, 3))\n >>> kde = KernelDensity(kernel='gaussian', bandwidth=0.5).fit(X)\n >>> log_density = kde.score_samples(X[:3])\n >>> log_density\n array([-1.52955942, -1.51462041, -1.60244657])\n " + "docstring": "Kernel Density Estimation.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n bandwidth : float, default=1.0\n The bandwidth of the kernel.\n\n algorithm : {'kd_tree', 'ball_tree', 'auto'}, default='auto'\n The tree algorithm to use.\n\n kernel : {'gaussian', 'tophat', 'epanechnikov', 'exponential', 'linear', 'cosine'}, default='gaussian'\n The kernel to use.\n\n metric : str, default='euclidean'\n The distance metric to use. Note that not all metrics are\n valid with all algorithms. Refer to the documentation of\n :class:`BallTree` and :class:`KDTree` for a description of\n available algorithms. Note that the normalization of the density\n output is correct only for the Euclidean distance metric. Default\n is 'euclidean'.\n\n atol : float, default=0\n The desired absolute tolerance of the result. A larger tolerance will\n generally lead to faster execution.\n\n rtol : float, default=0\n The desired relative tolerance of the result. A larger tolerance will\n generally lead to faster execution.\n\n breadth_first : bool, default=True\n If true (default), use a breadth-first approach to the problem.\n Otherwise use a depth-first approach.\n\n leaf_size : int, default=40\n Specify the leaf size of the underlying tree. See :class:`BallTree`\n or :class:`KDTree` for details.\n\n metric_params : dict, default=None\n Additional parameters to be passed to the tree for use with the\n metric. For more information, see the documentation of\n :class:`BallTree` or :class:`KDTree`.\n\n Attributes\n ----------\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n tree_ : ``BinaryTree`` instance\n The tree algorithm for fast generalized N-point problems.\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n See Also\n --------\n sklearn.neighbors.KDTree : K-dimensional tree for fast generalized N-point\n problems.\n sklearn.neighbors.BallTree : Ball tree for fast generalized N-point\n problems.\n\n Examples\n --------\n Compute a gaussian kernel density estimate with a fixed bandwidth.\n\n >>> from sklearn.neighbors import KernelDensity\n >>> import numpy as np\n >>> rng = np.random.RandomState(42)\n >>> X = rng.random_sample((100, 3))\n >>> kde = KernelDensity(kernel='gaussian', bandwidth=0.5).fit(X)\n >>> log_density = kde.score_samples(X[:3])\n >>> log_density\n array([-1.52955942, -1.51462041, -1.60244657])\n " }, { "id": "sklearn/sklearn.neighbors._lof/LocalOutlierFactor", @@ -27470,8 +27332,7 @@ "sklearn/sklearn.svm._base/BaseSVC/_sparse_predict_proba", "sklearn/sklearn.svm._base/BaseSVC/_get_coef", "sklearn/sklearn.svm._base/BaseSVC/probA_@getter", - "sklearn/sklearn.svm._base/BaseSVC/probB_@getter", - "sklearn/sklearn.svm._base/BaseSVC/_class_weight@getter" + "sklearn/sklearn.svm._base/BaseSVC/probB_@getter" ], "is_public": false, "description": "ABC for LibSVM-based classifiers.", @@ -27524,14 +27385,10 @@ "qname": "sklearn.svm._classes.NuSVR", "decorators": [], "superclasses": ["RegressorMixin", "BaseLibSVM"], - "methods": [ - "sklearn/sklearn.svm._classes/NuSVR/__init__", - "sklearn/sklearn.svm._classes/NuSVR/class_weight_@getter", - "sklearn/sklearn.svm._classes/NuSVR/_more_tags" - ], + "methods": ["sklearn/sklearn.svm._classes/NuSVR/__init__", "sklearn/sklearn.svm._classes/NuSVR/_more_tags"], "is_public": true, "description": "Nu Support Vector Regression.\n\nSimilar to NuSVC, for regression, uses a parameter nu to control\nthe number of support vectors. However, unlike NuSVC, where nu\nreplaces C, here nu replaces the parameter epsilon of epsilon-SVR.\n\nThe implementation is based on libsvm.\n\nRead more in the :ref:`User Guide `.", - "docstring": "Nu Support Vector Regression.\n\n Similar to NuSVC, for regression, uses a parameter nu to control\n the number of support vectors. However, unlike NuSVC, where nu\n replaces C, here nu replaces the parameter epsilon of epsilon-SVR.\n\n The implementation is based on libsvm.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n nu : float, default=0.5\n An upper bound on the fraction of training errors and a lower bound of\n the fraction of support vectors. Should be in the interval (0, 1]. By\n default 0.5 will be taken.\n\n C : float, default=1.0\n Penalty parameter C of the error term.\n\n kernel : {'linear', 'poly', 'rbf', 'sigmoid', 'precomputed'} or callable, default='rbf'\n Specifies the kernel type to be used in the algorithm.\n If none is given, 'rbf' will be used. If a callable is given it is\n used to precompute the kernel matrix.\n\n degree : int, default=3\n Degree of the polynomial kernel function ('poly').\n Ignored by all other kernels.\n\n gamma : {'scale', 'auto'} or float, default='scale'\n Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.\n\n - if ``gamma='scale'`` (default) is passed then it uses\n 1 / (n_features * X.var()) as value of gamma,\n - if 'auto', uses 1 / n_features.\n\n .. versionchanged:: 0.22\n The default value of ``gamma`` changed from 'auto' to 'scale'.\n\n coef0 : float, default=0.0\n Independent term in kernel function.\n It is only significant in 'poly' and 'sigmoid'.\n\n shrinking : bool, default=True\n Whether to use the shrinking heuristic.\n See the :ref:`User Guide `.\n\n tol : float, default=1e-3\n Tolerance for stopping criterion.\n\n cache_size : float, default=200\n Specify the size of the kernel cache (in MB).\n\n verbose : bool, default=False\n Enable verbose output. Note that this setting takes advantage of a\n per-process runtime setting in libsvm that, if enabled, may not work\n properly in a multithreaded context.\n\n max_iter : int, default=-1\n Hard limit on iterations within solver, or -1 for no limit.\n\n Attributes\n ----------\n class_weight_ : ndarray of shape (n_classes,)\n Multipliers of parameter C for each class.\n Computed based on the ``class_weight`` parameter.\n\n .. deprecated:: 1.2\n `class_weight_` was deprecated in version 1.2 and will be removed in 1.4.\n\n coef_ : ndarray of shape (1, n_features)\n Weights assigned to the features (coefficients in the primal\n problem). This is only available in the case of a linear kernel.\n\n `coef_` is readonly property derived from `dual_coef_` and\n `support_vectors_`.\n\n dual_coef_ : ndarray of shape (1, n_SV)\n Coefficients of the support vector in the decision function.\n\n fit_status_ : int\n 0 if correctly fitted, 1 otherwise (will raise warning)\n\n intercept_ : ndarray of shape (1,)\n Constants in decision function.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n n_iter_ : int\n Number of iterations run by the optimization routine to fit the model.\n\n .. versionadded:: 1.1\n\n n_support_ : ndarray of shape (n_classes,), dtype=int32\n Number of support vectors for each class.\n\n shape_fit_ : tuple of int of shape (n_dimensions_of_X,)\n Array dimensions of training vector ``X``.\n\n support_ : ndarray of shape (n_SV,)\n Indices of support vectors.\n\n support_vectors_ : ndarray of shape (n_SV, n_features)\n Support vectors.\n\n See Also\n --------\n NuSVC : Support Vector Machine for classification implemented with libsvm\n with a parameter to control the number of support vectors.\n\n SVR : Epsilon Support Vector Machine for regression implemented with\n libsvm.\n\n References\n ----------\n .. [1] `LIBSVM: A Library for Support Vector Machines\n `_\n\n .. [2] `Platt, John (1999). \"Probabilistic outputs for support vector\n machines and comparison to regularizedlikelihood methods.\"\n `_\n\n Examples\n --------\n >>> from sklearn.svm import NuSVR\n >>> from sklearn.pipeline import make_pipeline\n >>> from sklearn.preprocessing import StandardScaler\n >>> import numpy as np\n >>> n_samples, n_features = 10, 5\n >>> np.random.seed(0)\n >>> y = np.random.randn(n_samples)\n >>> X = np.random.randn(n_samples, n_features)\n >>> regr = make_pipeline(StandardScaler(), NuSVR(C=1.0, nu=0.1))\n >>> regr.fit(X, y)\n Pipeline(steps=[('standardscaler', StandardScaler()),\n ('nusvr', NuSVR(nu=0.1))])\n " + "docstring": "Nu Support Vector Regression.\n\n Similar to NuSVC, for regression, uses a parameter nu to control\n the number of support vectors. However, unlike NuSVC, where nu\n replaces C, here nu replaces the parameter epsilon of epsilon-SVR.\n\n The implementation is based on libsvm.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n nu : float, default=0.5\n An upper bound on the fraction of training errors and a lower bound of\n the fraction of support vectors. Should be in the interval (0, 1]. By\n default 0.5 will be taken.\n\n C : float, default=1.0\n Penalty parameter C of the error term.\n\n kernel : {'linear', 'poly', 'rbf', 'sigmoid', 'precomputed'} or callable, default='rbf'\n Specifies the kernel type to be used in the algorithm.\n If none is given, 'rbf' will be used. If a callable is given it is\n used to precompute the kernel matrix.\n\n degree : int, default=3\n Degree of the polynomial kernel function ('poly').\n Ignored by all other kernels.\n\n gamma : {'scale', 'auto'} or float, default='scale'\n Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.\n\n - if ``gamma='scale'`` (default) is passed then it uses\n 1 / (n_features * X.var()) as value of gamma,\n - if 'auto', uses 1 / n_features.\n\n .. versionchanged:: 0.22\n The default value of ``gamma`` changed from 'auto' to 'scale'.\n\n coef0 : float, default=0.0\n Independent term in kernel function.\n It is only significant in 'poly' and 'sigmoid'.\n\n shrinking : bool, default=True\n Whether to use the shrinking heuristic.\n See the :ref:`User Guide `.\n\n tol : float, default=1e-3\n Tolerance for stopping criterion.\n\n cache_size : float, default=200\n Specify the size of the kernel cache (in MB).\n\n verbose : bool, default=False\n Enable verbose output. Note that this setting takes advantage of a\n per-process runtime setting in libsvm that, if enabled, may not work\n properly in a multithreaded context.\n\n max_iter : int, default=-1\n Hard limit on iterations within solver, or -1 for no limit.\n\n Attributes\n ----------\n class_weight_ : ndarray of shape (n_classes,)\n Multipliers of parameter C for each class.\n Computed based on the ``class_weight`` parameter.\n\n coef_ : ndarray of shape (1, n_features)\n Weights assigned to the features (coefficients in the primal\n problem). This is only available in the case of a linear kernel.\n\n `coef_` is readonly property derived from `dual_coef_` and\n `support_vectors_`.\n\n dual_coef_ : ndarray of shape (1, n_SV)\n Coefficients of the support vector in the decision function.\n\n fit_status_ : int\n 0 if correctly fitted, 1 otherwise (will raise warning)\n\n intercept_ : ndarray of shape (1,)\n Constants in decision function.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n n_iter_ : int\n Number of iterations run by the optimization routine to fit the model.\n\n .. versionadded:: 1.1\n\n n_support_ : ndarray of shape (n_classes,), dtype=int32\n Number of support vectors for each class.\n\n shape_fit_ : tuple of int of shape (n_dimensions_of_X,)\n Array dimensions of training vector ``X``.\n\n support_ : ndarray of shape (n_SV,)\n Indices of support vectors.\n\n support_vectors_ : ndarray of shape (n_SV, n_features)\n Support vectors.\n\n See Also\n --------\n NuSVC : Support Vector Machine for classification implemented with libsvm\n with a parameter to control the number of support vectors.\n\n SVR : Epsilon Support Vector Machine for regression implemented with\n libsvm.\n\n References\n ----------\n .. [1] `LIBSVM: A Library for Support Vector Machines\n `_\n\n .. [2] `Platt, John (1999). \"Probabilistic outputs for support vector\n machines and comparison to regularizedlikelihood methods.\"\n `_\n\n Examples\n --------\n >>> from sklearn.svm import NuSVR\n >>> from sklearn.pipeline import make_pipeline\n >>> from sklearn.preprocessing import StandardScaler\n >>> import numpy as np\n >>> n_samples, n_features = 10, 5\n >>> np.random.seed(0)\n >>> y = np.random.randn(n_samples)\n >>> X = np.random.randn(n_samples, n_features)\n >>> regr = make_pipeline(StandardScaler(), NuSVR(C=1.0, nu=0.1))\n >>> regr.fit(X, y)\n Pipeline(steps=[('standardscaler', StandardScaler()),\n ('nusvr', NuSVR(nu=0.1))])\n " }, { "id": "sklearn/sklearn.svm._classes/OneClassSVM", @@ -27541,7 +27398,6 @@ "superclasses": ["OutlierMixin", "BaseLibSVM"], "methods": [ "sklearn/sklearn.svm._classes/OneClassSVM/__init__", - "sklearn/sklearn.svm._classes/OneClassSVM/class_weight_@getter", "sklearn/sklearn.svm._classes/OneClassSVM/fit", "sklearn/sklearn.svm._classes/OneClassSVM/decision_function", "sklearn/sklearn.svm._classes/OneClassSVM/score_samples", @@ -27550,7 +27406,7 @@ ], "is_public": true, "description": "Unsupervised Outlier Detection.\n\nEstimate the support of a high-dimensional distribution.\n\nThe implementation is based on libsvm.\n\nRead more in the :ref:`User Guide `.", - "docstring": "Unsupervised Outlier Detection.\n\n Estimate the support of a high-dimensional distribution.\n\n The implementation is based on libsvm.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n kernel : {'linear', 'poly', 'rbf', 'sigmoid', 'precomputed'} or callable, default='rbf'\n Specifies the kernel type to be used in the algorithm.\n If none is given, 'rbf' will be used. If a callable is given it is\n used to precompute the kernel matrix.\n\n degree : int, default=3\n Degree of the polynomial kernel function ('poly').\n Ignored by all other kernels.\n\n gamma : {'scale', 'auto'} or float, default='scale'\n Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.\n\n - if ``gamma='scale'`` (default) is passed then it uses\n 1 / (n_features * X.var()) as value of gamma,\n - if 'auto', uses 1 / n_features.\n\n .. versionchanged:: 0.22\n The default value of ``gamma`` changed from 'auto' to 'scale'.\n\n coef0 : float, default=0.0\n Independent term in kernel function.\n It is only significant in 'poly' and 'sigmoid'.\n\n tol : float, default=1e-3\n Tolerance for stopping criterion.\n\n nu : float, default=0.5\n An upper bound on the fraction of training\n errors and a lower bound of the fraction of support\n vectors. Should be in the interval (0, 1]. By default 0.5\n will be taken.\n\n shrinking : bool, default=True\n Whether to use the shrinking heuristic.\n See the :ref:`User Guide `.\n\n cache_size : float, default=200\n Specify the size of the kernel cache (in MB).\n\n verbose : bool, default=False\n Enable verbose output. Note that this setting takes advantage of a\n per-process runtime setting in libsvm that, if enabled, may not work\n properly in a multithreaded context.\n\n max_iter : int, default=-1\n Hard limit on iterations within solver, or -1 for no limit.\n\n Attributes\n ----------\n class_weight_ : ndarray of shape (n_classes,)\n Multipliers of parameter C for each class.\n Computed based on the ``class_weight`` parameter.\n\n .. deprecated:: 1.2\n `class_weight_` was deprecated in version 1.2 and will be removed in 1.4.\n\n coef_ : ndarray of shape (1, n_features)\n Weights assigned to the features (coefficients in the primal\n problem). This is only available in the case of a linear kernel.\n\n `coef_` is readonly property derived from `dual_coef_` and\n `support_vectors_`.\n\n dual_coef_ : ndarray of shape (1, n_SV)\n Coefficients of the support vectors in the decision function.\n\n fit_status_ : int\n 0 if correctly fitted, 1 otherwise (will raise warning)\n\n intercept_ : ndarray of shape (1,)\n Constant in the decision function.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n n_iter_ : int\n Number of iterations run by the optimization routine to fit the model.\n\n .. versionadded:: 1.1\n\n n_support_ : ndarray of shape (n_classes,), dtype=int32\n Number of support vectors for each class.\n\n offset_ : float\n Offset used to define the decision function from the raw scores.\n We have the relation: decision_function = score_samples - `offset_`.\n The offset is the opposite of `intercept_` and is provided for\n consistency with other outlier detection algorithms.\n\n .. versionadded:: 0.20\n\n shape_fit_ : tuple of int of shape (n_dimensions_of_X,)\n Array dimensions of training vector ``X``.\n\n support_ : ndarray of shape (n_SV,)\n Indices of support vectors.\n\n support_vectors_ : ndarray of shape (n_SV, n_features)\n Support vectors.\n\n See Also\n --------\n sklearn.linear_model.SGDOneClassSVM : Solves linear One-Class SVM using\n Stochastic Gradient Descent.\n sklearn.neighbors.LocalOutlierFactor : Unsupervised Outlier Detection using\n Local Outlier Factor (LOF).\n sklearn.ensemble.IsolationForest : Isolation Forest Algorithm.\n\n Examples\n --------\n >>> from sklearn.svm import OneClassSVM\n >>> X = [[0], [0.44], [0.45], [0.46], [1]]\n >>> clf = OneClassSVM(gamma='auto').fit(X)\n >>> clf.predict(X)\n array([-1, 1, 1, 1, -1])\n >>> clf.score_samples(X)\n array([1.7798..., 2.0547..., 2.0556..., 2.0561..., 1.7332...])\n " + "docstring": "Unsupervised Outlier Detection.\n\n Estimate the support of a high-dimensional distribution.\n\n The implementation is based on libsvm.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n kernel : {'linear', 'poly', 'rbf', 'sigmoid', 'precomputed'} or callable, default='rbf'\n Specifies the kernel type to be used in the algorithm.\n If none is given, 'rbf' will be used. If a callable is given it is\n used to precompute the kernel matrix.\n\n degree : int, default=3\n Degree of the polynomial kernel function ('poly').\n Ignored by all other kernels.\n\n gamma : {'scale', 'auto'} or float, default='scale'\n Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.\n\n - if ``gamma='scale'`` (default) is passed then it uses\n 1 / (n_features * X.var()) as value of gamma,\n - if 'auto', uses 1 / n_features.\n\n .. versionchanged:: 0.22\n The default value of ``gamma`` changed from 'auto' to 'scale'.\n\n coef0 : float, default=0.0\n Independent term in kernel function.\n It is only significant in 'poly' and 'sigmoid'.\n\n tol : float, default=1e-3\n Tolerance for stopping criterion.\n\n nu : float, default=0.5\n An upper bound on the fraction of training\n errors and a lower bound of the fraction of support\n vectors. Should be in the interval (0, 1]. By default 0.5\n will be taken.\n\n shrinking : bool, default=True\n Whether to use the shrinking heuristic.\n See the :ref:`User Guide `.\n\n cache_size : float, default=200\n Specify the size of the kernel cache (in MB).\n\n verbose : bool, default=False\n Enable verbose output. Note that this setting takes advantage of a\n per-process runtime setting in libsvm that, if enabled, may not work\n properly in a multithreaded context.\n\n max_iter : int, default=-1\n Hard limit on iterations within solver, or -1 for no limit.\n\n Attributes\n ----------\n class_weight_ : ndarray of shape (n_classes,)\n Multipliers of parameter C for each class.\n Computed based on the ``class_weight`` parameter.\n\n coef_ : ndarray of shape (1, n_features)\n Weights assigned to the features (coefficients in the primal\n problem). This is only available in the case of a linear kernel.\n\n `coef_` is readonly property derived from `dual_coef_` and\n `support_vectors_`.\n\n dual_coef_ : ndarray of shape (1, n_SV)\n Coefficients of the support vectors in the decision function.\n\n fit_status_ : int\n 0 if correctly fitted, 1 otherwise (will raise warning)\n\n intercept_ : ndarray of shape (1,)\n Constant in the decision function.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n n_iter_ : int\n Number of iterations run by the optimization routine to fit the model.\n\n .. versionadded:: 1.1\n\n n_support_ : ndarray of shape (n_classes,), dtype=int32\n Number of support vectors for each class.\n\n offset_ : float\n Offset used to define the decision function from the raw scores.\n We have the relation: decision_function = score_samples - `offset_`.\n The offset is the opposite of `intercept_` and is provided for\n consistency with other outlier detection algorithms.\n\n .. versionadded:: 0.20\n\n shape_fit_ : tuple of int of shape (n_dimensions_of_X,)\n Array dimensions of training vector ``X``.\n\n support_ : ndarray of shape (n_SV,)\n Indices of support vectors.\n\n support_vectors_ : ndarray of shape (n_SV, n_features)\n Support vectors.\n\n See Also\n --------\n sklearn.linear_model.SGDOneClassSVM : Solves linear One-Class SVM using\n Stochastic Gradient Descent.\n sklearn.neighbors.LocalOutlierFactor : Unsupervised Outlier Detection using\n Local Outlier Factor (LOF).\n sklearn.ensemble.IsolationForest : Isolation Forest Algorithm.\n\n Examples\n --------\n >>> from sklearn.svm import OneClassSVM\n >>> X = [[0], [0.44], [0.45], [0.46], [1]]\n >>> clf = OneClassSVM(gamma='auto').fit(X)\n >>> clf.predict(X)\n array([-1, 1, 1, 1, -1])\n >>> clf.score_samples(X)\n array([1.7798..., 2.0547..., 2.0556..., 2.0561..., 1.7332...])\n " }, { "id": "sklearn/sklearn.svm._classes/SVC", @@ -27569,14 +27425,10 @@ "qname": "sklearn.svm._classes.SVR", "decorators": [], "superclasses": ["RegressorMixin", "BaseLibSVM"], - "methods": [ - "sklearn/sklearn.svm._classes/SVR/__init__", - "sklearn/sklearn.svm._classes/SVR/class_weight_@getter", - "sklearn/sklearn.svm._classes/SVR/_more_tags" - ], + "methods": ["sklearn/sklearn.svm._classes/SVR/__init__", "sklearn/sklearn.svm._classes/SVR/_more_tags"], "is_public": true, "description": "Epsilon-Support Vector Regression.\n\nThe free parameters in the model are C and epsilon.\n\nThe implementation is based on libsvm. The fit time complexity\nis more than quadratic with the number of samples which makes it hard\nto scale to datasets with more than a couple of 10000 samples. For large\ndatasets consider using :class:`~sklearn.svm.LinearSVR` or\n:class:`~sklearn.linear_model.SGDRegressor` instead, possibly after a\n:class:`~sklearn.kernel_approximation.Nystroem` transformer.\n\nRead more in the :ref:`User Guide `.", - "docstring": "Epsilon-Support Vector Regression.\n\n The free parameters in the model are C and epsilon.\n\n The implementation is based on libsvm. The fit time complexity\n is more than quadratic with the number of samples which makes it hard\n to scale to datasets with more than a couple of 10000 samples. For large\n datasets consider using :class:`~sklearn.svm.LinearSVR` or\n :class:`~sklearn.linear_model.SGDRegressor` instead, possibly after a\n :class:`~sklearn.kernel_approximation.Nystroem` transformer.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n kernel : {'linear', 'poly', 'rbf', 'sigmoid', 'precomputed'} or callable, default='rbf'\n Specifies the kernel type to be used in the algorithm.\n If none is given, 'rbf' will be used. If a callable is given it is\n used to precompute the kernel matrix.\n\n degree : int, default=3\n Degree of the polynomial kernel function ('poly').\n Ignored by all other kernels.\n\n gamma : {'scale', 'auto'} or float, default='scale'\n Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.\n\n - if ``gamma='scale'`` (default) is passed then it uses\n 1 / (n_features * X.var()) as value of gamma,\n - if 'auto', uses 1 / n_features.\n\n .. versionchanged:: 0.22\n The default value of ``gamma`` changed from 'auto' to 'scale'.\n\n coef0 : float, default=0.0\n Independent term in kernel function.\n It is only significant in 'poly' and 'sigmoid'.\n\n tol : float, default=1e-3\n Tolerance for stopping criterion.\n\n C : float, default=1.0\n Regularization parameter. The strength of the regularization is\n inversely proportional to C. Must be strictly positive.\n The penalty is a squared l2 penalty.\n\n epsilon : float, default=0.1\n Epsilon in the epsilon-SVR model. It specifies the epsilon-tube\n within which no penalty is associated in the training loss function\n with points predicted within a distance epsilon from the actual\n value.\n\n shrinking : bool, default=True\n Whether to use the shrinking heuristic.\n See the :ref:`User Guide `.\n\n cache_size : float, default=200\n Specify the size of the kernel cache (in MB).\n\n verbose : bool, default=False\n Enable verbose output. Note that this setting takes advantage of a\n per-process runtime setting in libsvm that, if enabled, may not work\n properly in a multithreaded context.\n\n max_iter : int, default=-1\n Hard limit on iterations within solver, or -1 for no limit.\n\n Attributes\n ----------\n class_weight_ : ndarray of shape (n_classes,)\n Multipliers of parameter C for each class.\n Computed based on the ``class_weight`` parameter.\n\n .. deprecated:: 1.2\n `class_weight_` was deprecated in version 1.2 and will be removed in 1.4.\n\n coef_ : ndarray of shape (1, n_features)\n Weights assigned to the features (coefficients in the primal\n problem). This is only available in the case of a linear kernel.\n\n `coef_` is readonly property derived from `dual_coef_` and\n `support_vectors_`.\n\n dual_coef_ : ndarray of shape (1, n_SV)\n Coefficients of the support vector in the decision function.\n\n fit_status_ : int\n 0 if correctly fitted, 1 otherwise (will raise warning)\n\n intercept_ : ndarray of shape (1,)\n Constants in decision function.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n n_iter_ : int\n Number of iterations run by the optimization routine to fit the model.\n\n .. versionadded:: 1.1\n\n n_support_ : ndarray of shape (n_classes,), dtype=int32\n Number of support vectors for each class.\n\n shape_fit_ : tuple of int of shape (n_dimensions_of_X,)\n Array dimensions of training vector ``X``.\n\n support_ : ndarray of shape (n_SV,)\n Indices of support vectors.\n\n support_vectors_ : ndarray of shape (n_SV, n_features)\n Support vectors.\n\n See Also\n --------\n NuSVR : Support Vector Machine for regression implemented using libsvm\n using a parameter to control the number of support vectors.\n\n LinearSVR : Scalable Linear Support Vector Machine for regression\n implemented using liblinear.\n\n References\n ----------\n .. [1] `LIBSVM: A Library for Support Vector Machines\n `_\n\n .. [2] `Platt, John (1999). \"Probabilistic outputs for support vector\n machines and comparison to regularizedlikelihood methods.\"\n `_\n\n Examples\n --------\n >>> from sklearn.svm import SVR\n >>> from sklearn.pipeline import make_pipeline\n >>> from sklearn.preprocessing import StandardScaler\n >>> import numpy as np\n >>> n_samples, n_features = 10, 5\n >>> rng = np.random.RandomState(0)\n >>> y = rng.randn(n_samples)\n >>> X = rng.randn(n_samples, n_features)\n >>> regr = make_pipeline(StandardScaler(), SVR(C=1.0, epsilon=0.2))\n >>> regr.fit(X, y)\n Pipeline(steps=[('standardscaler', StandardScaler()),\n ('svr', SVR(epsilon=0.2))])\n " + "docstring": "Epsilon-Support Vector Regression.\n\n The free parameters in the model are C and epsilon.\n\n The implementation is based on libsvm. The fit time complexity\n is more than quadratic with the number of samples which makes it hard\n to scale to datasets with more than a couple of 10000 samples. For large\n datasets consider using :class:`~sklearn.svm.LinearSVR` or\n :class:`~sklearn.linear_model.SGDRegressor` instead, possibly after a\n :class:`~sklearn.kernel_approximation.Nystroem` transformer.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n kernel : {'linear', 'poly', 'rbf', 'sigmoid', 'precomputed'} or callable, default='rbf'\n Specifies the kernel type to be used in the algorithm.\n If none is given, 'rbf' will be used. If a callable is given it is\n used to precompute the kernel matrix.\n\n degree : int, default=3\n Degree of the polynomial kernel function ('poly').\n Ignored by all other kernels.\n\n gamma : {'scale', 'auto'} or float, default='scale'\n Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.\n\n - if ``gamma='scale'`` (default) is passed then it uses\n 1 / (n_features * X.var()) as value of gamma,\n - if 'auto', uses 1 / n_features.\n\n .. versionchanged:: 0.22\n The default value of ``gamma`` changed from 'auto' to 'scale'.\n\n coef0 : float, default=0.0\n Independent term in kernel function.\n It is only significant in 'poly' and 'sigmoid'.\n\n tol : float, default=1e-3\n Tolerance for stopping criterion.\n\n C : float, default=1.0\n Regularization parameter. The strength of the regularization is\n inversely proportional to C. Must be strictly positive.\n The penalty is a squared l2 penalty.\n\n epsilon : float, default=0.1\n Epsilon in the epsilon-SVR model. It specifies the epsilon-tube\n within which no penalty is associated in the training loss function\n with points predicted within a distance epsilon from the actual\n value.\n\n shrinking : bool, default=True\n Whether to use the shrinking heuristic.\n See the :ref:`User Guide `.\n\n cache_size : float, default=200\n Specify the size of the kernel cache (in MB).\n\n verbose : bool, default=False\n Enable verbose output. Note that this setting takes advantage of a\n per-process runtime setting in libsvm that, if enabled, may not work\n properly in a multithreaded context.\n\n max_iter : int, default=-1\n Hard limit on iterations within solver, or -1 for no limit.\n\n Attributes\n ----------\n class_weight_ : ndarray of shape (n_classes,)\n Multipliers of parameter C for each class.\n Computed based on the ``class_weight`` parameter.\n\n coef_ : ndarray of shape (1, n_features)\n Weights assigned to the features (coefficients in the primal\n problem). This is only available in the case of a linear kernel.\n\n `coef_` is readonly property derived from `dual_coef_` and\n `support_vectors_`.\n\n dual_coef_ : ndarray of shape (1, n_SV)\n Coefficients of the support vector in the decision function.\n\n fit_status_ : int\n 0 if correctly fitted, 1 otherwise (will raise warning)\n\n intercept_ : ndarray of shape (1,)\n Constants in decision function.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n n_iter_ : int\n Number of iterations run by the optimization routine to fit the model.\n\n .. versionadded:: 1.1\n\n n_support_ : ndarray of shape (n_classes,), dtype=int32\n Number of support vectors for each class.\n\n shape_fit_ : tuple of int of shape (n_dimensions_of_X,)\n Array dimensions of training vector ``X``.\n\n support_ : ndarray of shape (n_SV,)\n Indices of support vectors.\n\n support_vectors_ : ndarray of shape (n_SV, n_features)\n Support vectors.\n\n See Also\n --------\n NuSVR : Support Vector Machine for regression implemented using libsvm\n using a parameter to control the number of support vectors.\n\n LinearSVR : Scalable Linear Support Vector Machine for regression\n implemented using liblinear.\n\n References\n ----------\n .. [1] `LIBSVM: A Library for Support Vector Machines\n `_\n\n .. [2] `Platt, John (1999). \"Probabilistic outputs for support vector\n machines and comparison to regularizedlikelihood methods.\"\n `_\n\n Examples\n --------\n >>> from sklearn.svm import SVR\n >>> from sklearn.pipeline import make_pipeline\n >>> from sklearn.preprocessing import StandardScaler\n >>> import numpy as np\n >>> n_samples, n_features = 10, 5\n >>> rng = np.random.RandomState(0)\n >>> y = rng.randn(n_samples)\n >>> X = rng.randn(n_samples, n_features)\n >>> regr = make_pipeline(StandardScaler(), SVR(C=1.0, epsilon=0.2))\n >>> regr.fit(X, y)\n Pipeline(steps=[('standardscaler', StandardScaler()),\n ('svr', SVR(epsilon=0.2))])\n " }, { "id": "sklearn/sklearn.tree._classes/BaseDecisionTree", @@ -27902,140 +27754,6 @@ "description": "Wrap estimator which will not expose `sample_weight`.", "docstring": "Wrap estimator which will not expose `sample_weight`.\n\n Parameters\n ----------\n est : estimator, default=None\n The estimator to wrap.\n " }, - { - "id": "sklearn/sklearn.utils._param_validation/Interval", - "name": "Interval", - "qname": "sklearn.utils._param_validation.Interval", - "decorators": [], - "superclasses": ["_Constraint"], - "methods": [ - "sklearn/sklearn.utils._param_validation/Interval/__init__", - "sklearn/sklearn.utils._param_validation/Interval/_check_params", - "sklearn/sklearn.utils._param_validation/Interval/__contains__", - "sklearn/sklearn.utils._param_validation/Interval/is_satisfied_by", - "sklearn/sklearn.utils._param_validation/Interval/__str__" - ], - "is_public": false, - "description": "Constraint representing an typed interval.", - "docstring": "Constraint representing an typed interval.\n\n Parameters\n ----------\n type : {numbers.Integral, numbers.Real}\n The set of numbers in which to set the interval.\n\n left : float or int or None\n The left bound of the interval. None means left bound is -\u221e.\n\n right : float, int or None\n The right bound of the interval. None means right bound is +\u221e.\n\n closed : {\"left\", \"right\", \"both\", \"neither\"}\n Whether the interval is open or closed. Possible choices are:\n\n - `\"left\"`: the interval is closed on the left and open on the right.\n It is equivalent to the interval `[ left, right )`.\n - `\"right\"`: the interval is closed on the right and open on the left.\n It is equivalent to the interval `( left, right ]`.\n - `\"both\"`: the interval is closed.\n It is equivalent to the interval `[ left, right ]`.\n - `\"neither\"`: the interval is open.\n It is equivalent to the interval `( left, right )`.\n\n Notes\n -----\n Setting a bound to `None` and setting the interval closed is valid. For instance,\n strictly speaking, `Interval(Real, 0, None, closed=\"both\")` corresponds to\n `[0, +\u221e) U {+\u221e}`.\n " - }, - { - "id": "sklearn/sklearn.utils._param_validation/StrOptions", - "name": "StrOptions", - "qname": "sklearn.utils._param_validation.StrOptions", - "decorators": [], - "superclasses": ["_Constraint"], - "methods": [ - "sklearn/sklearn.utils._param_validation/StrOptions/__init__", - "sklearn/sklearn.utils._param_validation/StrOptions/is_satisfied_by", - "sklearn/sklearn.utils._param_validation/StrOptions/_mark_if_deprecated", - "sklearn/sklearn.utils._param_validation/StrOptions/__str__" - ], - "is_public": false, - "description": "Constraint representing a set of strings.", - "docstring": "Constraint representing a set of strings.\n\n Parameters\n ----------\n options : set of str\n The set of valid strings.\n\n deprecated : set of str or None, default=None\n A subset of the `options` to mark as deprecated in the repr of the constraint.\n " - }, - { - "id": "sklearn/sklearn.utils._param_validation/_ArrayLikes", - "name": "_ArrayLikes", - "qname": "sklearn.utils._param_validation._ArrayLikes", - "decorators": [], - "superclasses": ["_Constraint"], - "methods": [ - "sklearn/sklearn.utils._param_validation/_ArrayLikes/is_satisfied_by", - "sklearn/sklearn.utils._param_validation/_ArrayLikes/__str__" - ], - "is_public": false, - "description": "Constraint representing array-likes", - "docstring": "Constraint representing array-likes" - }, - { - "id": "sklearn/sklearn.utils._param_validation/_Callables", - "name": "_Callables", - "qname": "sklearn.utils._param_validation._Callables", - "decorators": [], - "superclasses": ["_Constraint"], - "methods": [ - "sklearn/sklearn.utils._param_validation/_Callables/is_satisfied_by", - "sklearn/sklearn.utils._param_validation/_Callables/__str__" - ], - "is_public": false, - "description": "Constraint representing callables.", - "docstring": "Constraint representing callables." - }, - { - "id": "sklearn/sklearn.utils._param_validation/_Constraint", - "name": "_Constraint", - "qname": "sklearn.utils._param_validation._Constraint", - "decorators": [], - "superclasses": ["ABC"], - "methods": [ - "sklearn/sklearn.utils._param_validation/_Constraint/is_satisfied_by", - "sklearn/sklearn.utils._param_validation/_Constraint/__str__" - ], - "is_public": false, - "description": "Base class for the constraint objects.", - "docstring": "Base class for the constraint objects." - }, - { - "id": "sklearn/sklearn.utils._param_validation/_InstancesOf", - "name": "_InstancesOf", - "qname": "sklearn.utils._param_validation._InstancesOf", - "decorators": [], - "superclasses": ["_Constraint"], - "methods": [ - "sklearn/sklearn.utils._param_validation/_InstancesOf/__init__", - "sklearn/sklearn.utils._param_validation/_InstancesOf/_type_name", - "sklearn/sklearn.utils._param_validation/_InstancesOf/is_satisfied_by", - "sklearn/sklearn.utils._param_validation/_InstancesOf/__str__" - ], - "is_public": false, - "description": "Constraint representing instances of a given type.", - "docstring": "Constraint representing instances of a given type.\n\n Parameters\n ----------\n type : type\n The valid type.\n " - }, - { - "id": "sklearn/sklearn.utils._param_validation/_NoneConstraint", - "name": "_NoneConstraint", - "qname": "sklearn.utils._param_validation._NoneConstraint", - "decorators": [], - "superclasses": ["_Constraint"], - "methods": [ - "sklearn/sklearn.utils._param_validation/_NoneConstraint/is_satisfied_by", - "sklearn/sklearn.utils._param_validation/_NoneConstraint/__str__" - ], - "is_public": false, - "description": "Constraint representing the None singleton.", - "docstring": "Constraint representing the None singleton." - }, - { - "id": "sklearn/sklearn.utils._param_validation/_RandomStates", - "name": "_RandomStates", - "qname": "sklearn.utils._param_validation._RandomStates", - "decorators": [], - "superclasses": ["_Constraint"], - "methods": [ - "sklearn/sklearn.utils._param_validation/_RandomStates/__init__", - "sklearn/sklearn.utils._param_validation/_RandomStates/is_satisfied_by", - "sklearn/sklearn.utils._param_validation/_RandomStates/__str__" - ], - "is_public": false, - "description": "Constraint representing random states.\n\nConvenience class for\n[Interval(Integral, 0, 2**32 - 1, closed=\"both\"), np.random.RandomState, None]", - "docstring": "Constraint representing random states.\n\n Convenience class for\n [Interval(Integral, 0, 2**32 - 1, closed=\"both\"), np.random.RandomState, None]\n " - }, - { - "id": "sklearn/sklearn.utils._param_validation/_SparseMatrices", - "name": "_SparseMatrices", - "qname": "sklearn.utils._param_validation._SparseMatrices", - "decorators": [], - "superclasses": ["_Constraint"], - "methods": [ - "sklearn/sklearn.utils._param_validation/_SparseMatrices/is_satisfied_by", - "sklearn/sklearn.utils._param_validation/_SparseMatrices/__str__" - ], - "is_public": false, - "description": "Constraint representing sparse matrices.", - "docstring": "Constraint representing sparse matrices." - }, { "id": "sklearn/sklearn.utils._pprint/KeyValTuple", "name": "KeyValTuple", @@ -28345,7 +28063,7 @@ "qname": "sklearn.__check_build.raise_build_error.e", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -28354,7 +28072,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -28619,7 +28337,7 @@ }, "type": { "kind": "EnumType", - "values": ["diagram", "text"] + "values": ["text", "diagram"] } }, { @@ -28738,7 +28456,7 @@ }, "type": { "kind": "EnumType", - "values": ["diagram", "text"] + "values": ["text", "diagram"] } }, { @@ -30098,7 +29816,7 @@ "qname": "sklearn._loss.loss.AbsoluteError.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -30111,7 +29829,7 @@ "qname": "sklearn._loss.loss.AbsoluteError.__init__.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -30120,7 +29838,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -30136,7 +29854,7 @@ "qname": "sklearn._loss.loss.AbsoluteError.fit_intercept_only.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -30149,7 +29867,7 @@ "qname": "sklearn._loss.loss.AbsoluteError.fit_intercept_only.y_true", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -30162,7 +29880,7 @@ "qname": "sklearn._loss.loss.AbsoluteError.fit_intercept_only.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -30171,7 +29889,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute raw_prediction of an intercept-only model.\n\nThis is the weighted median of the target, i.e. over the samples\naxis=0.", "docstring": "Compute raw_prediction of an intercept-only model.\n\n This is the weighted median of the target, i.e. over the samples\n axis=0.\n " }, @@ -30913,7 +30631,7 @@ }, "type": { "kind": "EnumType", - "values": ["C", "F"] + "values": ["F", "C"] } } ], @@ -31227,7 +30945,7 @@ "qname": "sklearn._loss.loss.HalfBinomialLoss.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -31240,7 +30958,7 @@ "qname": "sklearn._loss.loss.HalfBinomialLoss.__init__.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -31249,7 +30967,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -31265,7 +30983,7 @@ "qname": "sklearn._loss.loss.HalfBinomialLoss.constant_to_optimal_zero.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -31278,7 +30996,7 @@ "qname": "sklearn._loss.loss.HalfBinomialLoss.constant_to_optimal_zero.y_true", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -31291,7 +31009,7 @@ "qname": "sklearn._loss.loss.HalfBinomialLoss.constant_to_optimal_zero.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -31300,7 +31018,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -31316,7 +31034,7 @@ "qname": "sklearn._loss.loss.HalfBinomialLoss.predict_proba.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -31329,7 +31047,7 @@ "qname": "sklearn._loss.loss.HalfBinomialLoss.predict_proba.raw_prediction", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array of shape (n_samples,) or (n_samples, 1)", "description": "Raw prediction values (in link space)." @@ -31341,7 +31059,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict probabilities.", "docstring": "Predict probabilities.\n\n Parameters\n ----------\n raw_prediction : array of shape (n_samples,) or (n_samples, 1)\n Raw prediction values (in link space).\n\n Returns\n -------\n proba : array of shape (n_samples, 2)\n Element-wise class probabilities.\n " }, @@ -31357,7 +31075,7 @@ "qname": "sklearn._loss.loss.HalfGammaLoss.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -31370,7 +31088,7 @@ "qname": "sklearn._loss.loss.HalfGammaLoss.__init__.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -31379,7 +31097,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -31395,7 +31113,7 @@ "qname": "sklearn._loss.loss.HalfGammaLoss.constant_to_optimal_zero.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -31408,7 +31126,7 @@ "qname": "sklearn._loss.loss.HalfGammaLoss.constant_to_optimal_zero.y_true", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -31421,7 +31139,7 @@ "qname": "sklearn._loss.loss.HalfGammaLoss.constant_to_optimal_zero.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -31430,7 +31148,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -31446,7 +31164,7 @@ "qname": "sklearn._loss.loss.HalfMultinomialLoss.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -31459,7 +31177,7 @@ "qname": "sklearn._loss.loss.HalfMultinomialLoss.__init__.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -31472,7 +31190,7 @@ "qname": "sklearn._loss.loss.HalfMultinomialLoss.__init__.n_classes", "default_value": "3", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -31481,7 +31199,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -31497,7 +31215,7 @@ "qname": "sklearn._loss.loss.HalfMultinomialLoss.fit_intercept_only.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -31510,7 +31228,7 @@ "qname": "sklearn._loss.loss.HalfMultinomialLoss.fit_intercept_only.y_true", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -31523,7 +31241,7 @@ "qname": "sklearn._loss.loss.HalfMultinomialLoss.fit_intercept_only.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -31532,7 +31250,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute raw_prediction of an intercept-only model.\n\nThis is the softmax of the weighted average of the target, i.e. over\nthe samples axis=0.", "docstring": "Compute raw_prediction of an intercept-only model.\n\n This is the softmax of the weighted average of the target, i.e. over\n the samples axis=0.\n " }, @@ -31548,7 +31266,7 @@ "qname": "sklearn._loss.loss.HalfMultinomialLoss.gradient_proba.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -31561,7 +31279,7 @@ "qname": "sklearn._loss.loss.HalfMultinomialLoss.gradient_proba.y_true", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "C-contiguous array of shape (n_samples,)", "description": "Observed, true target values." @@ -31577,7 +31295,7 @@ "qname": "sklearn._loss.loss.HalfMultinomialLoss.gradient_proba.raw_prediction", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array of shape (n_samples, n_classes)", "description": "Raw prediction values (in link space)." @@ -31593,7 +31311,7 @@ "qname": "sklearn._loss.loss.HalfMultinomialLoss.gradient_proba.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "None or C-contiguous array of shape (n_samples,)", "description": "Sample weights." @@ -31618,7 +31336,7 @@ "qname": "sklearn._loss.loss.HalfMultinomialLoss.gradient_proba.gradient_out", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "None or array of shape (n_samples, n_classes)", "description": "A location into which the gradient is stored. If None, a new array\nmight be created." @@ -31643,7 +31361,7 @@ "qname": "sklearn._loss.loss.HalfMultinomialLoss.gradient_proba.proba_out", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "None or array of shape (n_samples, n_classes)", "description": "A location into which the class probabilities are stored. If None,\na new array might be created." @@ -31668,7 +31386,7 @@ "qname": "sklearn._loss.loss.HalfMultinomialLoss.gradient_proba.n_threads", "default_value": "1", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1", "description": "Might use openmp thread parallelism." @@ -31680,7 +31398,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute gradient and class probabilities fow raw_prediction.", "docstring": "Compute gradient and class probabilities fow raw_prediction.\n\n Parameters\n ----------\n y_true : C-contiguous array of shape (n_samples,)\n Observed, true target values.\n raw_prediction : array of shape (n_samples, n_classes)\n Raw prediction values (in link space).\n sample_weight : None or C-contiguous array of shape (n_samples,)\n Sample weights.\n gradient_out : None or array of shape (n_samples, n_classes)\n A location into which the gradient is stored. If None, a new array\n might be created.\n proba_out : None or array of shape (n_samples, n_classes)\n A location into which the class probabilities are stored. If None,\n a new array might be created.\n n_threads : int, default=1\n Might use openmp thread parallelism.\n\n Returns\n -------\n gradient : array of shape (n_samples, n_classes)\n Element-wise gradients.\n\n proba : array of shape (n_samples, n_classes)\n Element-wise class probabilities.\n " }, @@ -31696,7 +31414,7 @@ "qname": "sklearn._loss.loss.HalfMultinomialLoss.in_y_true_range.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -31709,7 +31427,7 @@ "qname": "sklearn._loss.loss.HalfMultinomialLoss.in_y_true_range.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray", "description": "" @@ -31721,7 +31439,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Return True if y is in the valid range of y_true.", "docstring": "Return True if y is in the valid range of y_true.\n\n Parameters\n ----------\n y : ndarray\n " }, @@ -31737,7 +31455,7 @@ "qname": "sklearn._loss.loss.HalfMultinomialLoss.predict_proba.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -31750,7 +31468,7 @@ "qname": "sklearn._loss.loss.HalfMultinomialLoss.predict_proba.raw_prediction", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array of shape (n_samples, n_classes)", "description": "Raw prediction values (in link space)." @@ -31762,7 +31480,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict probabilities.", "docstring": "Predict probabilities.\n\n Parameters\n ----------\n raw_prediction : array of shape (n_samples, n_classes)\n Raw prediction values (in link space).\n\n Returns\n -------\n proba : array of shape (n_samples, n_classes)\n Element-wise class probabilities.\n " }, @@ -31778,7 +31496,7 @@ "qname": "sklearn._loss.loss.HalfPoissonLoss.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -31791,7 +31509,7 @@ "qname": "sklearn._loss.loss.HalfPoissonLoss.__init__.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -31800,7 +31518,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -31816,7 +31534,7 @@ "qname": "sklearn._loss.loss.HalfPoissonLoss.constant_to_optimal_zero.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -31829,7 +31547,7 @@ "qname": "sklearn._loss.loss.HalfPoissonLoss.constant_to_optimal_zero.y_true", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -31842,7 +31560,7 @@ "qname": "sklearn._loss.loss.HalfPoissonLoss.constant_to_optimal_zero.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -31851,7 +31569,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -31867,7 +31585,7 @@ "qname": "sklearn._loss.loss.HalfSquaredError.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -31880,7 +31598,7 @@ "qname": "sklearn._loss.loss.HalfSquaredError.__init__.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -31889,7 +31607,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -31905,7 +31623,7 @@ "qname": "sklearn._loss.loss.HalfTweedieLoss.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -31918,7 +31636,7 @@ "qname": "sklearn._loss.loss.HalfTweedieLoss.__init__.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -31931,7 +31649,7 @@ "qname": "sklearn._loss.loss.HalfTweedieLoss.__init__.power", "default_value": "1.5", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -31940,7 +31658,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -31956,7 +31674,7 @@ "qname": "sklearn._loss.loss.HalfTweedieLoss.constant_to_optimal_zero.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -31969,7 +31687,7 @@ "qname": "sklearn._loss.loss.HalfTweedieLoss.constant_to_optimal_zero.y_true", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -31982,7 +31700,7 @@ "qname": "sklearn._loss.loss.HalfTweedieLoss.constant_to_optimal_zero.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -31991,7 +31709,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -32007,7 +31725,7 @@ "qname": "sklearn._loss.loss.HalfTweedieLossIdentity.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -32020,7 +31738,7 @@ "qname": "sklearn._loss.loss.HalfTweedieLossIdentity.__init__.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -32033,7 +31751,7 @@ "qname": "sklearn._loss.loss.HalfTweedieLossIdentity.__init__.power", "default_value": "1.5", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -32042,7 +31760,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -32058,7 +31776,7 @@ "qname": "sklearn._loss.loss.PinballLoss.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -32071,7 +31789,7 @@ "qname": "sklearn._loss.loss.PinballLoss.__init__.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -32084,7 +31802,7 @@ "qname": "sklearn._loss.loss.PinballLoss.__init__.quantile", "default_value": "0.5", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -32093,7 +31811,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -32109,7 +31827,7 @@ "qname": "sklearn._loss.loss.PinballLoss.fit_intercept_only.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -32122,7 +31840,7 @@ "qname": "sklearn._loss.loss.PinballLoss.fit_intercept_only.y_true", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -32135,7 +31853,7 @@ "qname": "sklearn._loss.loss.PinballLoss.fit_intercept_only.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -32144,7 +31862,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute raw_prediction of an intercept-only model.\n\nThis is the weighted median of the target, i.e. over the samples\naxis=0.", "docstring": "Compute raw_prediction of an intercept-only model.\n\n This is the weighted median of the target, i.e. over the samples\n axis=0.\n " }, @@ -32676,31 +32394,6 @@ "description": "Validate input data and set or check the `n_features_in_` attribute.", "docstring": "Validate input data and set or check the `n_features_in_` attribute.\n\n Parameters\n ----------\n X : {array-like, sparse matrix, dataframe} of shape (n_samples, n_features), default='no validation'\n The input samples.\n If `'no_validation'`, no validation is performed on `X`. This is\n useful for meta-estimator which can delegate input validation to\n their underlying estimator(s). In that case `y` must be passed and\n the only accepted `check_params` are `multi_output` and\n `y_numeric`.\n\n y : array-like of shape (n_samples,), default='no_validation'\n The targets.\n\n - If `None`, `check_array` is called on `X`. If the estimator's\n requires_y tag is True, then an error will be raised.\n - If `'no_validation'`, `check_array` is called on `X` and the\n estimator's requires_y tag is ignored. This is a default\n placeholder and is never meant to be explicitly set. In that case\n `X` must be passed.\n - Otherwise, only `y` with `_check_y` or both `X` and `y` are\n checked with either `check_array` or `check_X_y` depending on\n `validate_separately`.\n\n reset : bool, default=True\n Whether to reset the `n_features_in_` attribute.\n If False, the input will be checked for consistency with data\n provided when reset was last True.\n .. note::\n It is recommended to call reset=True in `fit` and in the first\n call to `partial_fit`. All other methods that validate `X`\n should set `reset=False`.\n\n validate_separately : False or tuple of dicts, default=False\n Only used if y is not None.\n If False, call validate_X_y(). Else, it must be a tuple of kwargs\n to be used for calling check_array() on X and y respectively.\n\n `estimator=self` is automatically added to these dicts to generate\n more informative error message in case of invalid input data.\n\n **check_params : kwargs\n Parameters passed to :func:`sklearn.utils.check_array` or\n :func:`sklearn.utils.check_X_y`. Ignored if validate_separately\n is not False.\n\n `estimator=self` is automatically added to these params to generate\n more informative error message in case of invalid input data.\n\n Returns\n -------\n out : {ndarray, sparse matrix} or tuple of these\n The validated input. A tuple is returned if both `X` and `y` are\n validated.\n " }, - { - "id": "sklearn/sklearn.base/BaseEstimator/_validate_params", - "name": "_validate_params", - "qname": "sklearn.base.BaseEstimator._validate_params", - "decorators": [], - "parameters": [ - { - "id": "sklearn/sklearn.base/BaseEstimator/_validate_params/self", - "name": "self", - "qname": "sklearn.base.BaseEstimator._validate_params.self", - "default_value": null, - "assigned_by": "IMPLICIT", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - } - ], - "results": [], - "is_public": false, - "description": "Validate types and values of constructor parameters\n\nThe expected type and values must be defined in the `_parameter_constraints`\nclass attribute, which is a dictionary `param_name: list of constraints`. See\nthe docstring of `validate_parameter_constraints` for a description of the\naccepted constraints.", - "docstring": "Validate types and values of constructor parameters\n\n The expected type and values must be defined in the `_parameter_constraints`\n class attribute, which is a dictionary `param_name: list of constraints`. See\n the docstring of `validate_parameter_constraints` for a description of the\n accepted constraints.\n " - }, { "id": "sklearn/sklearn.base/BaseEstimator/get_params", "name": "get_params", @@ -35466,7 +35159,7 @@ "qname": "sklearn.cluster._affinity_propagation.AffinityPropagation.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -35479,7 +35172,7 @@ "qname": "sklearn.cluster._affinity_propagation.AffinityPropagation.__init__.damping", "default_value": "0.5", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.5", "description": "Damping factor in the range `[0.5, 1.0)` is the extent to\nwhich the current value is maintained relative to\nincoming values (weighted 1 - damping). This in order\nto avoid numerical oscillations when updating these\nvalues (messages)." @@ -35508,7 +35201,7 @@ "qname": "sklearn.cluster._affinity_propagation.AffinityPropagation.__init__.max_iter", "default_value": "200", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=200", "description": "Maximum number of iterations." @@ -35524,7 +35217,7 @@ "qname": "sklearn.cluster._affinity_propagation.AffinityPropagation.__init__.convergence_iter", "default_value": "15", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=15", "description": "Number of iterations with no change in the number\nof estimated clusters that stops the convergence." @@ -35540,7 +35233,7 @@ "qname": "sklearn.cluster._affinity_propagation.AffinityPropagation.__init__.copy", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Make a copy of input data." @@ -35556,7 +35249,7 @@ "qname": "sklearn.cluster._affinity_propagation.AffinityPropagation.__init__.preference", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,) or float, default=None", "description": "Preferences for each point - points with larger values of\npreferences are more likely to be chosen as exemplars. The number\nof exemplars, ie of clusters, is influenced by the input\npreferences value. If the preferences are not passed as arguments,\nthey will be set to the median of the input similarities." @@ -35581,7 +35274,7 @@ "qname": "sklearn.cluster._affinity_propagation.AffinityPropagation.__init__.affinity", "default_value": "'euclidean'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'euclidean', 'precomputed'}, default='euclidean'", "description": "Which affinity to use. At the moment 'precomputed' and\n``euclidean`` are supported. 'euclidean' uses the\nnegative squared euclidean distance between points." @@ -35597,7 +35290,7 @@ "qname": "sklearn.cluster._affinity_propagation.AffinityPropagation.__init__.verbose", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Whether to be verbose." @@ -35613,7 +35306,7 @@ "qname": "sklearn.cluster._affinity_propagation.AffinityPropagation.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Pseudo-random number generator to control the starting state.\nUse an int for reproducible results across function calls.\nSee the :term:`Glossary `.\n\n.. versionadded:: 0.23\n this parameter was previously hardcoded as 0." @@ -35638,7 +35331,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -35679,7 +35372,7 @@ "qname": "sklearn.cluster._affinity_propagation.AffinityPropagation.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -35692,7 +35385,7 @@ "qname": "sklearn.cluster._affinity_propagation.AffinityPropagation.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features), or array-like of shape (n_samples, n_samples)", "description": "Training instances to cluster, or similarities / affinities between\ninstances if ``affinity='precomputed'``. If a sparse feature matrix\nis provided, it will be converted into a sparse ``csr_matrix``." @@ -35721,7 +35414,7 @@ "qname": "sklearn.cluster._affinity_propagation.AffinityPropagation.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present here for API consistency by convention." @@ -35733,7 +35426,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the clustering from features, or affinity matrix.", "docstring": "Fit the clustering from features, or affinity matrix.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features), or array-like of shape (n_samples, n_samples)\n Training instances to cluster, or similarities / affinities between\n instances if ``affinity='precomputed'``. If a sparse feature matrix\n is provided, it will be converted into a sparse ``csr_matrix``.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n self\n Returns the instance itself.\n " }, @@ -35749,7 +35442,7 @@ "qname": "sklearn.cluster._affinity_propagation.AffinityPropagation.fit_predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -35762,7 +35455,7 @@ "qname": "sklearn.cluster._affinity_propagation.AffinityPropagation.fit_predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features), or array-like of shape (n_samples, n_samples)", "description": "Training instances to cluster, or similarities / affinities between\ninstances if ``affinity='precomputed'``. If a sparse feature matrix\nis provided, it will be converted into a sparse ``csr_matrix``." @@ -35791,7 +35484,7 @@ "qname": "sklearn.cluster._affinity_propagation.AffinityPropagation.fit_predict.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present here for API consistency by convention." @@ -35803,7 +35496,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit clustering from features/affinity matrix; return cluster labels.", "docstring": "Fit clustering from features/affinity matrix; return cluster labels.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features), or array-like of shape (n_samples, n_samples)\n Training instances to cluster, or similarities / affinities between\n instances if ``affinity='precomputed'``. If a sparse feature matrix\n is provided, it will be converted into a sparse ``csr_matrix``.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n labels : ndarray of shape (n_samples,)\n Cluster labels.\n " }, @@ -35819,7 +35512,7 @@ "qname": "sklearn.cluster._affinity_propagation.AffinityPropagation.predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -35832,7 +35525,7 @@ "qname": "sklearn.cluster._affinity_propagation.AffinityPropagation.predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "New data to predict. If a sparse matrix is provided, it will be\nconverted into a sparse ``csr_matrix``." @@ -35853,7 +35546,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict the closest cluster each sample in X belongs to.", "docstring": "Predict the closest cluster each sample in X belongs to.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n New data to predict. If a sparse matrix is provided, it will be\n converted into a sparse ``csr_matrix``.\n\n Returns\n -------\n labels : ndarray of shape (n_samples,)\n Cluster labels.\n " }, @@ -36085,7 +35778,7 @@ "qname": "sklearn.cluster._agglomerative.AgglomerativeClustering.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -36098,7 +35791,7 @@ "qname": "sklearn.cluster._agglomerative.AgglomerativeClustering.__init__.n_clusters", "default_value": "2", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int or None, default=2", "description": "The number of clusters to find. It must be ``None`` if\n``distance_threshold`` is not ``None``." @@ -36123,7 +35816,7 @@ "qname": "sklearn.cluster._agglomerative.AgglomerativeClustering.__init__.affinity", "default_value": "'euclidean'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str or callable, default='euclidean'", "description": "Metric used to compute the linkage. Can be \"euclidean\", \"l1\", \"l2\",\n\"manhattan\", \"cosine\", or \"precomputed\".\nIf linkage is \"ward\", only \"euclidean\" is accepted.\nIf \"precomputed\", a distance matrix (instead of a similarity matrix)\nis needed as input for the fit method." @@ -36148,7 +35841,7 @@ "qname": "sklearn.cluster._agglomerative.AgglomerativeClustering.__init__.memory", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str or object with the joblib.Memory interface, default=None", "description": "Used to cache the output of the computation of the tree.\nBy default, no caching is done. If a string is given, it is the\npath to the caching directory." @@ -36173,7 +35866,7 @@ "qname": "sklearn.cluster._agglomerative.AgglomerativeClustering.__init__.connectivity", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like or callable, default=None", "description": "Connectivity matrix. Defines for each sample the neighboring\nsamples following a given structure of the data.\nThis can be a connectivity matrix itself or a callable that transforms\nthe data into a connectivity matrix, such as derived from\n`kneighbors_graph`. Default is ``None``, i.e, the\nhierarchical clustering algorithm is unstructured." @@ -36198,7 +35891,7 @@ "qname": "sklearn.cluster._agglomerative.AgglomerativeClustering.__init__.compute_full_tree", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "'auto' or bool, default='auto'", "description": "Stop early the construction of the tree at ``n_clusters``. This is\nuseful to decrease computation time if the number of clusters is not\nsmall compared to the number of samples. This option is useful only\nwhen specifying a connectivity matrix. Note also that when varying the\nnumber of clusters and using caching, it may be advantageous to compute\nthe full tree. It must be ``True`` if ``distance_threshold`` is not\n``None``. By default `compute_full_tree` is \"auto\", which is equivalent\nto `True` when `distance_threshold` is not `None` or that `n_clusters`\nis inferior to the maximum between 100 or `0.02 * n_samples`.\nOtherwise, \"auto\" is equivalent to `False`." @@ -36223,14 +35916,14 @@ "qname": "sklearn.cluster._agglomerative.AgglomerativeClustering.__init__.linkage", "default_value": "'ward'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'ward', 'complete', 'average', 'single'}, default='ward'", "description": "Which linkage criterion to use. The linkage criterion determines which\ndistance to use between sets of observation. The algorithm will merge\nthe pairs of cluster that minimize this criterion.\n\n- 'ward' minimizes the variance of the clusters being merged.\n- 'average' uses the average of the distances of each observation of\n the two sets.\n- 'complete' or 'maximum' linkage uses the maximum distances between\n all observations of the two sets.\n- 'single' uses the minimum of the distances between all observations\n of the two sets.\n\n.. versionadded:: 0.20\n Added the 'single' option" }, "type": { "kind": "EnumType", - "values": ["ward", "average", "complete", "single"] + "values": ["average", "ward", "single", "complete"] } }, { @@ -36239,7 +35932,7 @@ "qname": "sklearn.cluster._agglomerative.AgglomerativeClustering.__init__.distance_threshold", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=None", "description": "The linkage distance threshold above which, clusters will not be\nmerged. If not ``None``, ``n_clusters`` must be ``None`` and\n``compute_full_tree`` must be ``True``.\n\n.. versionadded:: 0.21" @@ -36255,7 +35948,7 @@ "qname": "sklearn.cluster._agglomerative.AgglomerativeClustering.__init__.compute_distances", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Computes distances between clusters even if `distance_threshold` is not\nused. This can be used to make dendrogram visualization, but introduces\na computational and memory overhead.\n\n.. versionadded:: 0.24" @@ -36267,7 +35960,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -36324,7 +36017,7 @@ "qname": "sklearn.cluster._agglomerative.AgglomerativeClustering.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -36337,7 +36030,7 @@ "qname": "sklearn.cluster._agglomerative.AgglomerativeClustering.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like, shape (n_samples, n_features) or (n_samples, n_samples)", "description": "Training instances to cluster, or distances between instances if\n``affinity='precomputed'``." @@ -36362,7 +36055,7 @@ "qname": "sklearn.cluster._agglomerative.AgglomerativeClustering.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present here for API consistency by convention." @@ -36374,7 +36067,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the hierarchical clustering from features, or distance matrix.", "docstring": "Fit the hierarchical clustering from features, or distance matrix.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features) or (n_samples, n_samples)\n Training instances to cluster, or distances between instances if\n ``affinity='precomputed'``.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n self : object\n Returns the fitted instance.\n " }, @@ -36390,7 +36083,7 @@ "qname": "sklearn.cluster._agglomerative.AgglomerativeClustering.fit_predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -36403,7 +36096,7 @@ "qname": "sklearn.cluster._agglomerative.AgglomerativeClustering.fit_predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features) or (n_samples, n_samples)", "description": "Training instances to cluster, or distances between instances if\n``affinity='precomputed'``." @@ -36419,7 +36112,7 @@ "qname": "sklearn.cluster._agglomerative.AgglomerativeClustering.fit_predict.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present here for API consistency by convention." @@ -36431,7 +36124,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit and return the result of each sample's clustering assignment.\n\nIn addition to fitting, this method also return the result of the\nclustering assignment for each sample in the training set.", "docstring": "Fit and return the result of each sample's clustering assignment.\n\n In addition to fitting, this method also return the result of the\n clustering assignment for each sample in the training set.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features) or (n_samples, n_samples)\n Training instances to cluster, or distances between instances if\n ``affinity='precomputed'``.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n labels : ndarray of shape (n_samples,)\n Cluster labels.\n " }, @@ -36447,7 +36140,7 @@ "qname": "sklearn.cluster._agglomerative.FeatureAgglomeration.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -36460,7 +36153,7 @@ "qname": "sklearn.cluster._agglomerative.FeatureAgglomeration.__init__.n_clusters", "default_value": "2", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=2", "description": "The number of clusters to find. It must be ``None`` if\n``distance_threshold`` is not ``None``." @@ -36476,7 +36169,7 @@ "qname": "sklearn.cluster._agglomerative.FeatureAgglomeration.__init__.affinity", "default_value": "'euclidean'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str or callable, default='euclidean'", "description": "Metric used to compute the linkage. Can be \"euclidean\", \"l1\", \"l2\",\n\"manhattan\", \"cosine\", or 'precomputed'.\nIf linkage is \"ward\", only \"euclidean\" is accepted." @@ -36501,7 +36194,7 @@ "qname": "sklearn.cluster._agglomerative.FeatureAgglomeration.__init__.memory", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str or object with the joblib.Memory interface, default=None", "description": "Used to cache the output of the computation of the tree.\nBy default, no caching is done. If a string is given, it is the\npath to the caching directory." @@ -36526,7 +36219,7 @@ "qname": "sklearn.cluster._agglomerative.FeatureAgglomeration.__init__.connectivity", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like or callable, default=None", "description": "Connectivity matrix. Defines for each feature the neighboring\nfeatures following a given structure of the data.\nThis can be a connectivity matrix itself or a callable that transforms\nthe data into a connectivity matrix, such as derived from\n`kneighbors_graph`. Default is `None`, i.e, the\nhierarchical clustering algorithm is unstructured." @@ -36551,7 +36244,7 @@ "qname": "sklearn.cluster._agglomerative.FeatureAgglomeration.__init__.compute_full_tree", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "'auto' or bool, default='auto'", "description": "Stop early the construction of the tree at `n_clusters`. This is useful\nto decrease computation time if the number of clusters is not small\ncompared to the number of features. This option is useful only when\nspecifying a connectivity matrix. Note also that when varying the\nnumber of clusters and using caching, it may be advantageous to compute\nthe full tree. It must be ``True`` if ``distance_threshold`` is not\n``None``. By default `compute_full_tree` is \"auto\", which is equivalent\nto `True` when `distance_threshold` is not `None` or that `n_clusters`\nis inferior to the maximum between 100 or `0.02 * n_samples`.\nOtherwise, \"auto\" is equivalent to `False`." @@ -36576,14 +36269,14 @@ "qname": "sklearn.cluster._agglomerative.FeatureAgglomeration.__init__.linkage", "default_value": "'ward'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{\"ward\", \"complete\", \"average\", \"single\"}, default=\"ward\"", "description": "Which linkage criterion to use. The linkage criterion determines which\ndistance to use between sets of features. The algorithm will merge\nthe pairs of cluster that minimize this criterion.\n\n- \"ward\" minimizes the variance of the clusters being merged.\n- \"complete\" or maximum linkage uses the maximum distances between\n all features of the two sets.\n- \"average\" uses the average of the distances of each feature of\n the two sets.\n- \"single\" uses the minimum of the distances between all features\n of the two sets." }, "type": { "kind": "EnumType", - "values": ["ward", "average", "complete", "single"] + "values": ["average", "ward", "single", "complete"] } }, { @@ -36592,7 +36285,7 @@ "qname": "sklearn.cluster._agglomerative.FeatureAgglomeration.__init__.pooling_func", "default_value": "np.mean", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "callable, default=np.mean", "description": "This combines the values of agglomerated features into a single\nvalue, and should accept an array of shape [M, N] and the keyword\nargument `axis=1`, and reduce it to an array of size [M]." @@ -36608,7 +36301,7 @@ "qname": "sklearn.cluster._agglomerative.FeatureAgglomeration.__init__.distance_threshold", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=None", "description": "The linkage distance threshold above which, clusters will not be\nmerged. If not ``None``, ``n_clusters`` must be ``None`` and\n``compute_full_tree`` must be ``True``.\n\n.. versionadded:: 0.21" @@ -36624,7 +36317,7 @@ "qname": "sklearn.cluster._agglomerative.FeatureAgglomeration.__init__.compute_distances", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Computes distances between clusters even if `distance_threshold` is not\nused. This can be used to make dendrogram visualization, but introduces\na computational and memory overhead.\n\n.. versionadded:: 0.24" @@ -36636,7 +36329,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -36652,7 +36345,7 @@ "qname": "sklearn.cluster._agglomerative.FeatureAgglomeration.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -36665,7 +36358,7 @@ "qname": "sklearn.cluster._agglomerative.FeatureAgglomeration.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "The data." @@ -36681,7 +36374,7 @@ "qname": "sklearn.cluster._agglomerative.FeatureAgglomeration.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present here for API consistency by convention." @@ -36693,7 +36386,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the hierarchical clustering on the data.", "docstring": "Fit the hierarchical clustering on the data.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The data.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n self : object\n Returns the transformer.\n " }, @@ -36709,7 +36402,7 @@ "qname": "sklearn.cluster._agglomerative.FeatureAgglomeration.fit_predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -36718,7 +36411,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit and return the result of each sample's clustering assignment.", "docstring": "Fit and return the result of each sample's clustering assignment." }, @@ -37041,7 +36734,7 @@ }, "type": { "kind": "EnumType", - "values": ["average", "complete", "single"] + "values": ["average", "single", "complete"] } }, { @@ -37530,7 +37223,7 @@ "qname": "sklearn.cluster._bicluster.SpectralBiclustering.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -37543,7 +37236,7 @@ "qname": "sklearn.cluster._bicluster.SpectralBiclustering.__init__.n_clusters", "default_value": "3", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int or tuple (n_row_clusters, n_column_clusters), default=3", "description": "The number of row and column clusters in the checkerboard\nstructure." @@ -37568,14 +37261,14 @@ "qname": "sklearn.cluster._bicluster.SpectralBiclustering.__init__.method", "default_value": "'bistochastic'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'bistochastic', 'scale', 'log'}, default='bistochastic'", "description": "Method of normalizing and converting singular vectors into\nbiclusters. May be one of 'scale', 'bistochastic', or 'log'.\nThe authors recommend using 'log'. If the data is sparse,\nhowever, log normalization will not work, which is why the\ndefault is 'bistochastic'.\n\n.. warning::\n if `method='log'`, the data must be sparse." }, "type": { "kind": "EnumType", - "values": ["scale", "log", "bistochastic"] + "values": ["scale", "bistochastic", "log"] } }, { @@ -37584,7 +37277,7 @@ "qname": "sklearn.cluster._bicluster.SpectralBiclustering.__init__.n_components", "default_value": "6", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=6", "description": "Number of singular vectors to check." @@ -37600,7 +37293,7 @@ "qname": "sklearn.cluster._bicluster.SpectralBiclustering.__init__.n_best", "default_value": "3", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=3", "description": "Number of best singular vectors to which to project the data\nfor clustering." @@ -37616,7 +37309,7 @@ "qname": "sklearn.cluster._bicluster.SpectralBiclustering.__init__.svd_method", "default_value": "'randomized'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'randomized', 'arpack'}, default='randomized'", "description": "Selects the algorithm for finding singular vectors. May be\n'randomized' or 'arpack'. If 'randomized', uses\n:func:`~sklearn.utils.extmath.randomized_svd`, which may be faster\nfor large matrices. If 'arpack', uses\n`scipy.sparse.linalg.svds`, which is more accurate, but\npossibly slower in some cases." @@ -37632,7 +37325,7 @@ "qname": "sklearn.cluster._bicluster.SpectralBiclustering.__init__.n_svd_vecs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Number of vectors to use in calculating the SVD. Corresponds\nto `ncv` when `svd_method=arpack` and `n_oversamples` when\n`svd_method` is 'randomized`." @@ -37648,7 +37341,7 @@ "qname": "sklearn.cluster._bicluster.SpectralBiclustering.__init__.mini_batch", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Whether to use mini-batch k-means, which is faster but may get\ndifferent results." @@ -37664,7 +37357,7 @@ "qname": "sklearn.cluster._bicluster.SpectralBiclustering.__init__.init", "default_value": "'k-means++'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'k-means++', 'random'} or ndarray of (n_clusters, n_features), default='k-means++'", "description": "Method for initialization of k-means algorithm; defaults to\n'k-means++'." @@ -37689,7 +37382,7 @@ "qname": "sklearn.cluster._bicluster.SpectralBiclustering.__init__.n_init", "default_value": "10", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=10", "description": "Number of random initializations that are tried with the\nk-means algorithm.\n\nIf mini-batch k-means is used, the best initialization is\nchosen and the algorithm runs once. Otherwise, the algorithm\nis run for each initialization and the best solution chosen." @@ -37705,7 +37398,7 @@ "qname": "sklearn.cluster._bicluster.SpectralBiclustering.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance, default=None", "description": "Used for randomizing the singular value decomposition and the k-means\ninitialization. Use an int to make the randomness deterministic.\nSee :term:`Glossary `." @@ -37726,7 +37419,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -37946,7 +37639,7 @@ "qname": "sklearn.cluster._bicluster.SpectralCoclustering.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -37959,7 +37652,7 @@ "qname": "sklearn.cluster._bicluster.SpectralCoclustering.__init__.n_clusters", "default_value": "3", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=3", "description": "The number of biclusters to find." @@ -37975,7 +37668,7 @@ "qname": "sklearn.cluster._bicluster.SpectralCoclustering.__init__.svd_method", "default_value": "'randomized'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'randomized', 'arpack'}, default='randomized'", "description": "Selects the algorithm for finding singular vectors. May be\n'randomized' or 'arpack'. If 'randomized', use\n:func:`sklearn.utils.extmath.randomized_svd`, which may be faster\nfor large matrices. If 'arpack', use\n:func:`scipy.sparse.linalg.svds`, which is more accurate, but\npossibly slower in some cases." @@ -37991,7 +37684,7 @@ "qname": "sklearn.cluster._bicluster.SpectralCoclustering.__init__.n_svd_vecs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Number of vectors to use in calculating the SVD. Corresponds\nto `ncv` when `svd_method=arpack` and `n_oversamples` when\n`svd_method` is 'randomized`." @@ -38007,7 +37700,7 @@ "qname": "sklearn.cluster._bicluster.SpectralCoclustering.__init__.mini_batch", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Whether to use mini-batch k-means, which is faster but may get\ndifferent results." @@ -38023,7 +37716,7 @@ "qname": "sklearn.cluster._bicluster.SpectralCoclustering.__init__.init", "default_value": "'k-means++'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'k-means++', 'random', or ndarray of shape (n_clusters, n_features), default='k-means++'", "description": "Method for initialization of k-means algorithm; defaults to\n'k-means++'." @@ -38052,7 +37745,7 @@ "qname": "sklearn.cluster._bicluster.SpectralCoclustering.__init__.n_init", "default_value": "10", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=10", "description": "Number of random initializations that are tried with the\nk-means algorithm.\n\nIf mini-batch k-means is used, the best initialization is\nchosen and the algorithm runs once. Otherwise, the algorithm\nis run for each initialization and the best solution chosen." @@ -38068,7 +37761,7 @@ "qname": "sklearn.cluster._bicluster.SpectralCoclustering.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance, default=None", "description": "Used for randomizing the singular value decomposition and the k-means\ninitialization. Use an int to make the randomness deterministic.\nSee :term:`Glossary `." @@ -38089,7 +37782,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -38282,7 +37975,7 @@ "qname": "sklearn.cluster._birch.Birch.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -38295,7 +37988,7 @@ "qname": "sklearn.cluster._birch.Birch.__init__.threshold", "default_value": "0.5", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.5", "description": "The radius of the subcluster obtained by merging a new sample and the\nclosest subcluster should be lesser than the threshold. Otherwise a new\nsubcluster is started. Setting this value to be very low promotes\nsplitting and vice-versa." @@ -38311,7 +38004,7 @@ "qname": "sklearn.cluster._birch.Birch.__init__.branching_factor", "default_value": "50", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=50", "description": "Maximum number of CF subclusters in each node. If a new samples enters\nsuch that the number of subclusters exceed the branching_factor then\nthat node is split into two nodes with the subclusters redistributed\nin each. The parent subcluster of that node is removed and two new\nsubclusters are added as parents of the 2 split nodes." @@ -38327,7 +38020,7 @@ "qname": "sklearn.cluster._birch.Birch.__init__.n_clusters", "default_value": "3", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, instance of sklearn.cluster model, default=3", "description": "Number of clusters after the final clustering step, which treats the\nsubclusters from the leaves as new samples.\n\n- `None` : the final clustering step is not performed and the\n subclusters are returned as they are.\n\n- :mod:`sklearn.cluster` Estimator : If a model is provided, the model\n is fit treating the subclusters as new samples and the initial data\n is mapped to the label of the closest subcluster.\n\n- `int` : the model fit is :class:`AgglomerativeClustering` with\n `n_clusters` set to be equal to the int." @@ -38352,7 +38045,7 @@ "qname": "sklearn.cluster._birch.Birch.__init__.compute_labels", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether or not to compute labels for each fit." @@ -38368,7 +38061,7 @@ "qname": "sklearn.cluster._birch.Birch.__init__.copy", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether or not to make a copy of the given data. If set to False,\nthe initial data will be overwritten." @@ -38380,7 +38073,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -38536,31 +38229,6 @@ "description": "Global clustering for the subclusters obtained after fitting", "docstring": "\n Global clustering for the subclusters obtained after fitting\n " }, - { - "id": "sklearn/sklearn.cluster._birch/Birch/_more_tags", - "name": "_more_tags", - "qname": "sklearn.cluster._birch.Birch._more_tags", - "decorators": [], - "parameters": [ - { - "id": "sklearn/sklearn.cluster._birch/Birch/_more_tags/self", - "name": "self", - "qname": "sklearn.cluster._birch.Birch._more_tags.self", - "default_value": null, - "assigned_by": "IMPLICIT", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - } - ], - "results": [], - "is_public": false, - "description": "", - "docstring": null - }, { "id": "sklearn/sklearn.cluster._birch/Birch/_predict", "name": "_predict", @@ -38611,7 +38279,7 @@ "qname": "sklearn.cluster._birch.Birch.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -38624,7 +38292,7 @@ "qname": "sklearn.cluster._birch.Birch.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Input data." @@ -38649,7 +38317,7 @@ "qname": "sklearn.cluster._birch.Birch.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present here for API consistency by convention." @@ -38661,7 +38329,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Build a CF Tree for the input data.", "docstring": "\n Build a CF Tree for the input data.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Input data.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n self\n Fitted estimator.\n " }, @@ -38677,7 +38345,7 @@ "qname": "sklearn.cluster._birch.Birch.fit_.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -38686,7 +38354,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -38702,7 +38370,7 @@ "qname": "sklearn.cluster._birch.Birch.partial_fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -38715,7 +38383,7 @@ "qname": "sklearn.cluster._birch.Birch.partial_fit.X", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features), default=None", "description": "Input data. If X is not provided, only the global clustering\nstep is done." @@ -38740,7 +38408,7 @@ "qname": "sklearn.cluster._birch.Birch.partial_fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present here for API consistency by convention." @@ -38752,7 +38420,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Online learning. Prevents rebuilding of CFTree from scratch.", "docstring": "\n Online learning. Prevents rebuilding of CFTree from scratch.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features), default=None\n Input data. If X is not provided, only the global clustering\n step is done.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n self\n Fitted estimator.\n " }, @@ -38768,7 +38436,7 @@ "qname": "sklearn.cluster._birch.Birch.partial_fit_.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -38777,7 +38445,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -38793,7 +38461,7 @@ "qname": "sklearn.cluster._birch.Birch.predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -38806,7 +38474,7 @@ "qname": "sklearn.cluster._birch.Birch.predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Input data." @@ -38827,7 +38495,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict data using the ``centroids_`` of subclusters.\n\nAvoid computation of the row norms of X.", "docstring": "\n Predict data using the ``centroids_`` of subclusters.\n\n Avoid computation of the row norms of X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Input data.\n\n Returns\n -------\n labels : ndarray of shape(n_samples,)\n Labelled data.\n " }, @@ -38843,7 +38511,7 @@ "qname": "sklearn.cluster._birch.Birch.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -38856,7 +38524,7 @@ "qname": "sklearn.cluster._birch.Birch.transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Input data." @@ -38877,7 +38545,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Transform X into subcluster centroids dimension.\n\nEach dimension represents the distance from the sample point to each\ncluster centroid.", "docstring": "\n Transform X into subcluster centroids dimension.\n\n Each dimension represents the distance from the sample point to each\n cluster centroid.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Input data.\n\n Returns\n -------\n X_trans : {array-like, sparse matrix} of shape (n_samples, n_clusters)\n Transformed data.\n " }, @@ -38963,19 +38631,6 @@ "kind": "NamedType", "name": "int" } - }, - { - "id": "sklearn/sklearn.cluster._birch/_CFNode/__init__/dtype", - "name": "dtype", - "qname": "sklearn.cluster._birch._CFNode.__init__.dtype", - "default_value": null, - "assigned_by": "NAME_ONLY", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} } ], "results": [], @@ -39366,7 +39021,7 @@ "qname": "sklearn.cluster._bisect_k_means.BisectingKMeans.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -39379,7 +39034,7 @@ "qname": "sklearn.cluster._bisect_k_means.BisectingKMeans.__init__.n_clusters", "default_value": "8", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=8", "description": "The number of clusters to form as well as the number of\ncentroids to generate." @@ -39395,7 +39050,7 @@ "qname": "sklearn.cluster._bisect_k_means.BisectingKMeans.__init__.init", "default_value": "'random'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'k-means++', 'random'} or callable, default='random'", "description": "Method for initialization:\n\n'k-means++' : selects initial cluster centers for k-mean\nclustering in a smart way to speed up convergence. See section\nNotes in k_init for more details.\n\n'random': choose `n_clusters` observations (rows) at random from data\nfor the initial centroids.\n\nIf a callable is passed, it should take arguments X, n_clusters and a\nrandom state and return an initialization." @@ -39420,7 +39075,7 @@ "qname": "sklearn.cluster._bisect_k_means.BisectingKMeans.__init__.n_init", "default_value": "1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1", "description": "Number of time the inner k-means algorithm will be run with different\ncentroid seeds in each bisection.\nThat will result producing for each bisection best output of n_init\nconsecutive runs in terms of inertia." @@ -39436,7 +39091,7 @@ "qname": "sklearn.cluster._bisect_k_means.BisectingKMeans.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Determines random number generation for centroid initialization\nin inner K-Means. Use an int to make the randomness deterministic.\nSee :term:`Glossary `." @@ -39465,7 +39120,7 @@ "qname": "sklearn.cluster._bisect_k_means.BisectingKMeans.__init__.max_iter", "default_value": "300", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=300", "description": "Maximum number of iterations of the inner k-means algorithm at each\nbisection." @@ -39481,7 +39136,7 @@ "qname": "sklearn.cluster._bisect_k_means.BisectingKMeans.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "Verbosity mode." @@ -39497,7 +39152,7 @@ "qname": "sklearn.cluster._bisect_k_means.BisectingKMeans.__init__.tol", "default_value": "0.0001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-4", "description": "Relative tolerance with regards to Frobenius norm of the difference\nin the cluster centers of two consecutive iterations to declare\nconvergence. Used in inner k-means algorithm at each bisection to pick\nbest possible clusters." @@ -39513,7 +39168,7 @@ "qname": "sklearn.cluster._bisect_k_means.BisectingKMeans.__init__.copy_x", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "When pre-computing distances it is more numerically accurate to center\nthe data first. If copy_x is True (default), then the original data is\nnot modified. If False, the original data is modified, and put back\nbefore the function returns, but small numerical differences may be\nintroduced by subtracting and then adding the data mean. Note that if\nthe original data is not C-contiguous, a copy will be made even if\ncopy_x is False. If the original data is sparse, but not in CSR format,\na copy will be made even if copy_x is False." @@ -39529,7 +39184,7 @@ "qname": "sklearn.cluster._bisect_k_means.BisectingKMeans.__init__.algorithm", "default_value": "'lloyd'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{\"lloyd\", \"elkan\"}, default=\"lloyd\"", "description": "Inner K-means algorithm used in bisection.\nThe classical EM-style algorithm is `\"lloyd\"`.\nThe `\"elkan\"` variation can be more efficient on some datasets with\nwell-defined clusters, by using the triangle inequality. However it's\nmore memory intensive due to the allocation of an extra array of shape\n`(n_samples, n_clusters)`." @@ -39545,19 +39200,19 @@ "qname": "sklearn.cluster._bisect_k_means.BisectingKMeans.__init__.bisecting_strategy", "default_value": "'biggest_inertia'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{\"biggest_inertia\", \"largest_cluster\"}, default=\"biggest_inertia\"", "description": "Defines how bisection should be performed:\n\n - \"biggest_inertia\" means that BisectingKMeans will always check\n all calculated cluster for cluster with biggest SSE\n (Sum of squared errors) and bisect it. This approach concentrates on\n precision, but may be costly in terms of execution time (especially for\n larger amount of data points).\n\n - \"largest_cluster\" - BisectingKMeans will always split cluster with\n largest amount of points assigned to it from all clusters\n previously calculated. That should work faster than picking by SSE\n ('biggest_inertia') and may produce similar results in most cases." }, "type": { "kind": "EnumType", - "values": ["biggest_inertia", "largest_cluster"] + "values": ["largest_cluster", "biggest_inertia"] } } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -39659,6 +39314,44 @@ "description": "Split a cluster into 2 subsclusters.", "docstring": "Split a cluster into 2 subsclusters.\n\n Parameters\n ----------\n X : {ndarray, csr_matrix} of shape (n_samples, n_features)\n Training instances to cluster.\n\n x_squared_norms : ndarray of shape (n_samples,)\n Squared euclidean norm of each data point.\n\n sample_weight : ndarray of shape (n_samples,)\n The weights for each observation in X.\n\n cluster_to_bisect : _BisectingTree node object\n The cluster node to split.\n " }, + { + "id": "sklearn/sklearn.cluster._bisect_k_means/BisectingKMeans/_check_params", + "name": "_check_params", + "qname": "sklearn.cluster._bisect_k_means.BisectingKMeans._check_params", + "decorators": [], + "parameters": [ + { + "id": "sklearn/sklearn.cluster._bisect_k_means/BisectingKMeans/_check_params/self", + "name": "self", + "qname": "sklearn.cluster._bisect_k_means.BisectingKMeans._check_params.self", + "default_value": null, + "assigned_by": "IMPLICIT", + "is_public": false, + "docstring": { + "type": "", + "description": "" + }, + "type": {} + }, + { + "id": "sklearn/sklearn.cluster._bisect_k_means/BisectingKMeans/_check_params/X", + "name": "X", + "qname": "sklearn.cluster._bisect_k_means.BisectingKMeans._check_params.X", + "default_value": null, + "assigned_by": "POSITION_OR_NAME", + "is_public": false, + "docstring": { + "type": "", + "description": "" + }, + "type": {} + } + ], + "results": [], + "is_public": false, + "description": "", + "docstring": null + }, { "id": "sklearn/sklearn.cluster._bisect_k_means/BisectingKMeans/_inertia_per_cluster", "name": "_inertia_per_cluster", @@ -39930,7 +39623,7 @@ "qname": "sklearn.cluster._bisect_k_means.BisectingKMeans.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -39943,7 +39636,7 @@ "qname": "sklearn.cluster._bisect_k_means.BisectingKMeans.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Training instances to cluster.\n\n.. note:: The data will be converted to C ordering,\n which will cause a memory copy\n if the given data is not C-contiguous." @@ -39968,7 +39661,7 @@ "qname": "sklearn.cluster._bisect_k_means.BisectingKMeans.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present here for API consistency by convention." @@ -39984,7 +39677,7 @@ "qname": "sklearn.cluster._bisect_k_means.BisectingKMeans.fit.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "The weights for each observation in X. If None, all observations\nare assigned equal weight." @@ -39996,7 +39689,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute bisecting k-means clustering.", "docstring": "Compute bisecting k-means clustering.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n\n Training instances to cluster.\n\n .. note:: The data will be converted to C ordering,\n which will cause a memory copy\n if the given data is not C-contiguous.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n sample_weight : array-like of shape (n_samples,), default=None\n The weights for each observation in X. If None, all observations\n are assigned equal weight.\n\n Returns\n -------\n self\n Fitted estimator.\n " }, @@ -40012,7 +39705,7 @@ "qname": "sklearn.cluster._bisect_k_means.BisectingKMeans.predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -40025,7 +39718,7 @@ "qname": "sklearn.cluster._bisect_k_means.BisectingKMeans.predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "New data to predict." @@ -40046,7 +39739,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict which cluster each sample in X belongs to.\n\nPrediction is made by going down the hierarchical tree\nin searching of closest leaf cluster.\n\nIn the vector quantization literature, `cluster_centers_` is called\nthe code book and each value returned by `predict` is the index of\nthe closest code in the code book.", "docstring": "Predict which cluster each sample in X belongs to.\n\n Prediction is made by going down the hierarchical tree\n in searching of closest leaf cluster.\n\n In the vector quantization literature, `cluster_centers_` is called\n the code book and each value returned by `predict` is the index of\n the closest code in the code book.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n New data to predict.\n\n Returns\n -------\n labels : ndarray of shape (n_samples,)\n Index of the cluster each sample belongs to.\n " }, @@ -40240,7 +39933,7 @@ "qname": "sklearn.cluster._dbscan.DBSCAN.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -40253,7 +39946,7 @@ "qname": "sklearn.cluster._dbscan.DBSCAN.__init__.eps", "default_value": "0.5", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.5", "description": "The maximum distance between two samples for one to be considered\nas in the neighborhood of the other. This is not a maximum bound\non the distances of points within a cluster. This is the most\nimportant DBSCAN parameter to choose appropriately for your data set\nand distance function." @@ -40269,7 +39962,7 @@ "qname": "sklearn.cluster._dbscan.DBSCAN.__init__.min_samples", "default_value": "5", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=5", "description": "The number of samples (or total weight) in a neighborhood for a point\nto be considered as a core point. This includes the point itself." @@ -40285,7 +39978,7 @@ "qname": "sklearn.cluster._dbscan.DBSCAN.__init__.metric", "default_value": "'euclidean'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str, or callable, default='euclidean'", "description": "The metric to use when calculating distance between instances in a\nfeature array. If metric is a string or callable, it must be one of\nthe options allowed by :func:`sklearn.metrics.pairwise_distances` for\nits metric parameter.\nIf metric is \"precomputed\", X is assumed to be a distance matrix and\nmust be square. X may be a :term:`sparse graph`, in which\ncase only \"nonzero\" elements may be considered neighbors for DBSCAN.\n\n.. versionadded:: 0.17\n metric *precomputed* to accept precomputed sparse matrix." @@ -40310,7 +40003,7 @@ "qname": "sklearn.cluster._dbscan.DBSCAN.__init__.metric_params", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict, default=None", "description": "Additional keyword arguments for the metric function.\n\n.. versionadded:: 0.19" @@ -40326,14 +40019,14 @@ "qname": "sklearn.cluster._dbscan.DBSCAN.__init__.algorithm", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'", "description": "The algorithm to be used by the NearestNeighbors module\nto compute pointwise distances and find nearest neighbors.\nSee NearestNeighbors module documentation for details." }, "type": { "kind": "EnumType", - "values": ["auto", "kd_tree", "ball_tree", "brute"] + "values": ["auto", "kd_tree", "brute", "ball_tree"] } }, { @@ -40342,7 +40035,7 @@ "qname": "sklearn.cluster._dbscan.DBSCAN.__init__.leaf_size", "default_value": "30", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=30", "description": "Leaf size passed to BallTree or cKDTree. This can affect the speed\nof the construction and query, as well as the memory required\nto store the tree. The optimal value depends\non the nature of the problem." @@ -40358,7 +40051,7 @@ "qname": "sklearn.cluster._dbscan.DBSCAN.__init__.p", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=None", "description": "The power of the Minkowski metric to be used to calculate distance\nbetween points. If None, then ``p=2`` (equivalent to the Euclidean\ndistance)." @@ -40374,7 +40067,7 @@ "qname": "sklearn.cluster._dbscan.DBSCAN.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "The number of parallel jobs to run.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details." @@ -40386,7 +40079,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -40427,7 +40120,7 @@ "qname": "sklearn.cluster._dbscan.DBSCAN.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -40440,7 +40133,7 @@ "qname": "sklearn.cluster._dbscan.DBSCAN.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features), or (n_samples, n_samples)", "description": "Training instances to cluster, or distances between instances if\n``metric='precomputed'``. If a sparse matrix is provided, it will\nbe converted into a sparse ``csr_matrix``." @@ -40469,7 +40162,7 @@ "qname": "sklearn.cluster._dbscan.DBSCAN.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present here for API consistency by convention." @@ -40485,7 +40178,7 @@ "qname": "sklearn.cluster._dbscan.DBSCAN.fit.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "Weight of each sample, such that a sample with a weight of at least\n``min_samples`` is by itself a core sample; a sample with a\nnegative weight may inhibit its eps-neighbor from being core.\nNote that weights are absolute, and default to 1." @@ -40497,7 +40190,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Perform DBSCAN clustering from features, or distance matrix.", "docstring": "Perform DBSCAN clustering from features, or distance matrix.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features), or (n_samples, n_samples)\n Training instances to cluster, or distances between instances if\n ``metric='precomputed'``. If a sparse matrix is provided, it will\n be converted into a sparse ``csr_matrix``.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Weight of each sample, such that a sample with a weight of at least\n ``min_samples`` is by itself a core sample; a sample with a\n negative weight may inhibit its eps-neighbor from being core.\n Note that weights are absolute, and default to 1.\n\n Returns\n -------\n self : object\n Returns a fitted instance of self.\n " }, @@ -40513,7 +40206,7 @@ "qname": "sklearn.cluster._dbscan.DBSCAN.fit_predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -40526,7 +40219,7 @@ "qname": "sklearn.cluster._dbscan.DBSCAN.fit_predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features), or (n_samples, n_samples)", "description": "Training instances to cluster, or distances between instances if\n``metric='precomputed'``. If a sparse matrix is provided, it will\nbe converted into a sparse ``csr_matrix``." @@ -40555,7 +40248,7 @@ "qname": "sklearn.cluster._dbscan.DBSCAN.fit_predict.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present here for API consistency by convention." @@ -40571,7 +40264,7 @@ "qname": "sklearn.cluster._dbscan.DBSCAN.fit_predict.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "Weight of each sample, such that a sample with a weight of at least\n``min_samples`` is by itself a core sample; a sample with a\nnegative weight may inhibit its eps-neighbor from being core.\nNote that weights are absolute, and default to 1." @@ -40583,7 +40276,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute clusters from a data or distance matrix and predict labels.", "docstring": "Compute clusters from a data or distance matrix and predict labels.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features), or (n_samples, n_samples)\n Training instances to cluster, or distances between instances if\n ``metric='precomputed'``. If a sparse matrix is provided, it will\n be converted into a sparse ``csr_matrix``.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Weight of each sample, such that a sample with a weight of at least\n ``min_samples`` is by itself a core sample; a sample with a\n negative weight may inhibit its eps-neighbor from being core.\n Note that weights are absolute, and default to 1.\n\n Returns\n -------\n labels : ndarray of shape (n_samples,)\n Cluster labels. Noisy samples are given the label -1.\n " }, @@ -40704,7 +40397,7 @@ }, "type": { "kind": "EnumType", - "values": ["auto", "kd_tree", "ball_tree", "brute"] + "values": ["auto", "kd_tree", "brute", "ball_tree"] } }, { @@ -40871,7 +40564,7 @@ "qname": "sklearn.cluster._kmeans.KMeans.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -40884,7 +40577,7 @@ "qname": "sklearn.cluster._kmeans.KMeans.__init__.n_clusters", "default_value": "8", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=8", "description": "The number of clusters to form as well as the number of\ncentroids to generate." @@ -40900,7 +40593,7 @@ "qname": "sklearn.cluster._kmeans.KMeans.__init__.init", "default_value": "'k-means++'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'k-means++', 'random'}, callable or array-like of shape (n_clusters, n_features), default='k-means++'", "description": "Method for initialization:\n\n'k-means++' : selects initial cluster centers for k-mean\nclustering in a smart way to speed up convergence. See section\nNotes in k_init for more details.\n\n'random': choose `n_clusters` observations (rows) at random from data\nfor the initial centroids.\n\nIf an array is passed, it should be of shape (n_clusters, n_features)\nand gives the initial centers.\n\nIf a callable is passed, it should take arguments X, n_clusters and a\nrandom state and return an initialization." @@ -40927,25 +40620,16 @@ "id": "sklearn/sklearn.cluster._kmeans/KMeans/__init__/n_init", "name": "n_init", "qname": "sklearn.cluster._kmeans.KMeans.__init__.n_init", - "default_value": "'warn'", + "default_value": "10", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { - "type": "'auto' or int, default=10", - "description": "Number of time the k-means algorithm will be run with different\ncentroid seeds. The final results will be the best output of\nn_init consecutive runs in terms of inertia.\n\nWhen `n_init='auto'`, the number of runs will be 10 if using\n`init='random'`, and 1 if using `init='kmeans++'`.\n\n.. versionadded:: 1.2\n Added 'auto' option for `n_init`.\n\n.. versionchanged:: 1.4\n Default value for `n_init` will change from 10 to `'auto'` in version 1.4." + "type": "int, default=10", + "description": "Number of time the k-means algorithm will be run with different\ncentroid seeds. The final results will be the best output of\nn_init consecutive runs in terms of inertia." }, "type": { - "kind": "UnionType", - "types": [ - { - "kind": "NamedType", - "name": "'auto'" - }, - { - "kind": "NamedType", - "name": "int" - } - ] + "kind": "NamedType", + "name": "int" } }, { @@ -40954,7 +40638,7 @@ "qname": "sklearn.cluster._kmeans.KMeans.__init__.max_iter", "default_value": "300", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=300", "description": "Maximum number of iterations of the k-means algorithm for a\nsingle run." @@ -40970,7 +40654,7 @@ "qname": "sklearn.cluster._kmeans.KMeans.__init__.tol", "default_value": "0.0001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-4", "description": "Relative tolerance with regards to Frobenius norm of the difference\nin the cluster centers of two consecutive iterations to declare\nconvergence." @@ -40986,7 +40670,7 @@ "qname": "sklearn.cluster._kmeans.KMeans.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "Verbosity mode." @@ -41002,7 +40686,7 @@ "qname": "sklearn.cluster._kmeans.KMeans.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Determines random number generation for centroid initialization. Use\nan int to make the randomness deterministic.\nSee :term:`Glossary `." @@ -41031,7 +40715,7 @@ "qname": "sklearn.cluster._kmeans.KMeans.__init__.copy_x", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "When pre-computing distances it is more numerically accurate to center\nthe data first. If copy_x is True (default), then the original data is\nnot modified. If False, the original data is modified, and put back\nbefore the function returns, but small numerical differences may be\nintroduced by subtracting and then adding the data mean. Note that if\nthe original data is not C-contiguous, a copy will be made even if\ncopy_x is False. If the original data is sparse, but not in CSR format,\na copy will be made even if copy_x is False." @@ -41047,32 +40731,32 @@ "qname": "sklearn.cluster._kmeans.KMeans.__init__.algorithm", "default_value": "'lloyd'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{\"lloyd\", \"elkan\", \"auto\", \"full\"}, default=\"lloyd\"", "description": "K-means algorithm to use. The classical EM-style algorithm is `\"lloyd\"`.\nThe `\"elkan\"` variation can be more efficient on some datasets with\nwell-defined clusters, by using the triangle inequality. However it's\nmore memory intensive due to the allocation of an extra array of shape\n`(n_samples, n_clusters)`.\n\n`\"auto\"` and `\"full\"` are deprecated and they will be removed in\nScikit-Learn 1.3. They are both aliases for `\"lloyd\"`.\n\n.. versionchanged:: 0.18\n Added Elkan algorithm\n\n.. versionchanged:: 1.1\n Renamed \"full\" to \"lloyd\", and deprecated \"auto\" and \"full\".\n Changed \"auto\" to use \"lloyd\" instead of \"elkan\"." }, "type": { "kind": "EnumType", - "values": ["full", "lloyd", "elkan", "auto"] + "values": ["auto", "lloyd", "elkan", "full"] } } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, { - "id": "sklearn/sklearn.cluster._kmeans/KMeans/_check_params_vs_input", - "name": "_check_params_vs_input", - "qname": "sklearn.cluster._kmeans.KMeans._check_params_vs_input", + "id": "sklearn/sklearn.cluster._kmeans/KMeans/_check_params", + "name": "_check_params", + "qname": "sklearn.cluster._kmeans.KMeans._check_params", "decorators": [], "parameters": [ { - "id": "sklearn/sklearn.cluster._kmeans/KMeans/_check_params_vs_input/self", + "id": "sklearn/sklearn.cluster._kmeans/KMeans/_check_params/self", "name": "self", - "qname": "sklearn.cluster._kmeans.KMeans._check_params_vs_input.self", + "qname": "sklearn.cluster._kmeans.KMeans._check_params.self", "default_value": null, "assigned_by": "IMPLICIT", "is_public": false, @@ -41083,9 +40767,9 @@ "type": {} }, { - "id": "sklearn/sklearn.cluster._kmeans/KMeans/_check_params_vs_input/X", + "id": "sklearn/sklearn.cluster._kmeans/KMeans/_check_params/X", "name": "X", - "qname": "sklearn.cluster._kmeans.KMeans._check_params_vs_input.X", + "qname": "sklearn.cluster._kmeans.KMeans._check_params.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", "is_public": false, @@ -41151,7 +40835,7 @@ "qname": "sklearn.cluster._kmeans.KMeans.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -41164,7 +40848,7 @@ "qname": "sklearn.cluster._kmeans.KMeans.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Training instances to cluster. It must be noted that the data\nwill be converted to C ordering, which will cause a memory\ncopy if the given data is not C-contiguous.\nIf a sparse matrix is passed, a copy will be made if it's not in\nCSR format." @@ -41189,7 +40873,7 @@ "qname": "sklearn.cluster._kmeans.KMeans.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present here for API consistency by convention." @@ -41205,7 +40889,7 @@ "qname": "sklearn.cluster._kmeans.KMeans.fit.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "The weights for each observation in X. If None, all observations\nare assigned equal weight.\n\n.. versionadded:: 0.20" @@ -41217,7 +40901,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute k-means clustering.", "docstring": "Compute k-means clustering.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training instances to cluster. It must be noted that the data\n will be converted to C ordering, which will cause a memory\n copy if the given data is not C-contiguous.\n If a sparse matrix is passed, a copy will be made if it's not in\n CSR format.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n sample_weight : array-like of shape (n_samples,), default=None\n The weights for each observation in X. If None, all observations\n are assigned equal weight.\n\n .. versionadded:: 0.20\n\n Returns\n -------\n self : object\n Fitted estimator.\n " }, @@ -41233,7 +40917,7 @@ "qname": "sklearn.cluster._kmeans.MiniBatchKMeans.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -41246,7 +40930,7 @@ "qname": "sklearn.cluster._kmeans.MiniBatchKMeans.__init__.n_clusters", "default_value": "8", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=8", "description": "The number of clusters to form as well as the number of\ncentroids to generate." @@ -41262,7 +40946,7 @@ "qname": "sklearn.cluster._kmeans.MiniBatchKMeans.__init__.init", "default_value": "'k-means++'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'k-means++', 'random'}, callable or array-like of shape (n_clusters, n_features), default='k-means++'", "description": "Method for initialization:\n\n'k-means++' : selects initial cluster centers for k-mean\nclustering in a smart way to speed up convergence. See section\nNotes in k_init for more details.\n\n'random': choose `n_clusters` observations (rows) at random from data\nfor the initial centroids.\n\nIf an array is passed, it should be of shape (n_clusters, n_features)\nand gives the initial centers.\n\nIf a callable is passed, it should take arguments X, n_clusters and a\nrandom state and return an initialization." @@ -41291,7 +40975,7 @@ "qname": "sklearn.cluster._kmeans.MiniBatchKMeans.__init__.max_iter", "default_value": "100", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=100", "description": "Maximum number of iterations over the complete dataset before\nstopping independently of any early stopping criterion heuristics." @@ -41307,7 +40991,7 @@ "qname": "sklearn.cluster._kmeans.MiniBatchKMeans.__init__.batch_size", "default_value": "1024", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1024", "description": "Size of the mini batches.\nFor faster computations, you can set the ``batch_size`` greater than\n256 * number of cores to enable parallelism on all cores.\n\n.. versionchanged:: 1.0\n `batch_size` default changed from 100 to 1024." @@ -41323,7 +41007,7 @@ "qname": "sklearn.cluster._kmeans.MiniBatchKMeans.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "Verbosity mode." @@ -41339,7 +41023,7 @@ "qname": "sklearn.cluster._kmeans.MiniBatchKMeans.__init__.compute_labels", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Compute label assignment and inertia for the complete dataset\nonce the minibatch optimization has converged in fit." @@ -41355,7 +41039,7 @@ "qname": "sklearn.cluster._kmeans.MiniBatchKMeans.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Determines random number generation for centroid initialization and\nrandom reassignment. Use an int to make the randomness deterministic.\nSee :term:`Glossary `." @@ -41384,7 +41068,7 @@ "qname": "sklearn.cluster._kmeans.MiniBatchKMeans.__init__.tol", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0", "description": "Control early stopping based on the relative center changes as\nmeasured by a smoothed, variance-normalized of the mean center\nsquared position changes. This early stopping heuristics is\ncloser to the one used for the batch variant of the algorithms\nbut induces a slight computational and memory overhead over the\ninertia heuristic.\n\nTo disable convergence detection based on normalized center\nchange, set tol to 0.0 (default)." @@ -41400,7 +41084,7 @@ "qname": "sklearn.cluster._kmeans.MiniBatchKMeans.__init__.max_no_improvement", "default_value": "10", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=10", "description": "Control early stopping based on the consecutive number of mini\nbatches that does not yield an improvement on the smoothed inertia.\n\nTo disable convergence detection based on inertia, set\nmax_no_improvement to None." @@ -41416,7 +41100,7 @@ "qname": "sklearn.cluster._kmeans.MiniBatchKMeans.__init__.init_size", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Number of samples to randomly sample for speeding up the\ninitialization (sometimes at the expense of accuracy): the\nonly algorithm is initialized by running a batch KMeans on a\nrandom subset of the data. This needs to be larger than n_clusters.\n\nIf `None`, the heuristic is `init_size = 3 * batch_size` if\n`3 * batch_size < n_clusters`, else `init_size = 3 * n_clusters`." @@ -41430,25 +41114,16 @@ "id": "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/__init__/n_init", "name": "n_init", "qname": "sklearn.cluster._kmeans.MiniBatchKMeans.__init__.n_init", - "default_value": "'warn'", + "default_value": "3", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { - "type": "'auto' or int, default=3", - "description": "Number of random initializations that are tried.\nIn contrast to KMeans, the algorithm is only run once, using the\nbest of the ``n_init`` initializations as measured by inertia.\n\nWhen `n_init='auto'`, the number of runs will be 3 if using\n`init='random'`, and 1 if using `init='kmeans++'`.\n\n.. versionadded:: 1.2\n Added 'auto' option for `n_init`.\n\n.. versionchanged:: 1.4\n Default value for `n_init` will change from 3 to `'auto'` in version 1.4." + "type": "int, default=3", + "description": "Number of random initializations that are tried.\nIn contrast to KMeans, the algorithm is only run once, using the\nbest of the ``n_init`` initializations as measured by inertia." }, "type": { - "kind": "UnionType", - "types": [ - { - "kind": "NamedType", - "name": "'auto'" - }, - { - "kind": "NamedType", - "name": "int" - } - ] + "kind": "NamedType", + "name": "int" } }, { @@ -41457,7 +41132,7 @@ "qname": "sklearn.cluster._kmeans.MiniBatchKMeans.__init__.reassignment_ratio", "default_value": "0.01", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.01", "description": "Control the fraction of the maximum number of counts for a center to\nbe reassigned. A higher value means that low count centers are more\neasily reassigned, which means that the model will take longer to\nconverge, but should converge in a better clustering. However, too high\na value may cause convergence issues, especially with a small batch\nsize." @@ -41469,20 +41144,20 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, { - "id": "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/_check_params_vs_input", - "name": "_check_params_vs_input", - "qname": "sklearn.cluster._kmeans.MiniBatchKMeans._check_params_vs_input", + "id": "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/_check_params", + "name": "_check_params", + "qname": "sklearn.cluster._kmeans.MiniBatchKMeans._check_params", "decorators": [], "parameters": [ { - "id": "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/_check_params_vs_input/self", + "id": "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/_check_params/self", "name": "self", - "qname": "sklearn.cluster._kmeans.MiniBatchKMeans._check_params_vs_input.self", + "qname": "sklearn.cluster._kmeans.MiniBatchKMeans._check_params.self", "default_value": null, "assigned_by": "IMPLICIT", "is_public": false, @@ -41493,9 +41168,9 @@ "type": {} }, { - "id": "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/_check_params_vs_input/X", + "id": "sklearn/sklearn.cluster._kmeans/MiniBatchKMeans/_check_params/X", "name": "X", - "qname": "sklearn.cluster._kmeans.MiniBatchKMeans._check_params_vs_input.X", + "qname": "sklearn.cluster._kmeans.MiniBatchKMeans._check_params.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", "is_public": false, @@ -41676,7 +41351,7 @@ "qname": "sklearn.cluster._kmeans.MiniBatchKMeans.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -41689,7 +41364,7 @@ "qname": "sklearn.cluster._kmeans.MiniBatchKMeans.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Training instances to cluster. It must be noted that the data\nwill be converted to C ordering, which will cause a memory copy\nif the given data is not C-contiguous.\nIf a sparse matrix is passed, a copy will be made if it's not in\nCSR format." @@ -41714,7 +41389,7 @@ "qname": "sklearn.cluster._kmeans.MiniBatchKMeans.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present here for API consistency by convention." @@ -41730,7 +41405,7 @@ "qname": "sklearn.cluster._kmeans.MiniBatchKMeans.fit.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "The weights for each observation in X. If None, all observations\nare assigned equal weight.\n\n.. versionadded:: 0.20" @@ -41742,7 +41417,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute the centroids on X by chunking it into mini-batches.", "docstring": "Compute the centroids on X by chunking it into mini-batches.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training instances to cluster. It must be noted that the data\n will be converted to C ordering, which will cause a memory copy\n if the given data is not C-contiguous.\n If a sparse matrix is passed, a copy will be made if it's not in\n CSR format.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n sample_weight : array-like of shape (n_samples,), default=None\n The weights for each observation in X. If None, all observations\n are assigned equal weight.\n\n .. versionadded:: 0.20\n\n Returns\n -------\n self : object\n Fitted estimator.\n " }, @@ -41758,7 +41433,7 @@ "qname": "sklearn.cluster._kmeans.MiniBatchKMeans.partial_fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -41771,7 +41446,7 @@ "qname": "sklearn.cluster._kmeans.MiniBatchKMeans.partial_fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Training instances to cluster. It must be noted that the data\nwill be converted to C ordering, which will cause a memory copy\nif the given data is not C-contiguous.\nIf a sparse matrix is passed, a copy will be made if it's not in\nCSR format." @@ -41796,7 +41471,7 @@ "qname": "sklearn.cluster._kmeans.MiniBatchKMeans.partial_fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present here for API consistency by convention." @@ -41812,7 +41487,7 @@ "qname": "sklearn.cluster._kmeans.MiniBatchKMeans.partial_fit.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "The weights for each observation in X. If None, all observations\nare assigned equal weight." @@ -41824,7 +41499,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Update k means estimate on a single mini-batch X.", "docstring": "Update k means estimate on a single mini-batch X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training instances to cluster. It must be noted that the data\n will be converted to C ordering, which will cause a memory copy\n if the given data is not C-contiguous.\n If a sparse matrix is passed, a copy will be made if it's not in\n CSR format.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n sample_weight : array-like of shape (n_samples,), default=None\n The weights for each observation in X. If None, all observations\n are assigned equal weight.\n\n Returns\n -------\n self : object\n Return updated estimator.\n " }, @@ -41996,15 +41671,15 @@ "docstring": "Check when vcomp and mkl are both present" }, { - "id": "sklearn/sklearn.cluster._kmeans/_BaseKMeans/_check_params_vs_input", - "name": "_check_params_vs_input", - "qname": "sklearn.cluster._kmeans._BaseKMeans._check_params_vs_input", + "id": "sklearn/sklearn.cluster._kmeans/_BaseKMeans/_check_params", + "name": "_check_params", + "qname": "sklearn.cluster._kmeans._BaseKMeans._check_params", "decorators": [], "parameters": [ { - "id": "sklearn/sklearn.cluster._kmeans/_BaseKMeans/_check_params_vs_input/self", + "id": "sklearn/sklearn.cluster._kmeans/_BaseKMeans/_check_params/self", "name": "self", - "qname": "sklearn.cluster._kmeans._BaseKMeans._check_params_vs_input.self", + "qname": "sklearn.cluster._kmeans._BaseKMeans._check_params.self", "default_value": null, "assigned_by": "IMPLICIT", "is_public": false, @@ -42015,9 +41690,9 @@ "type": {} }, { - "id": "sklearn/sklearn.cluster._kmeans/_BaseKMeans/_check_params_vs_input/X", + "id": "sklearn/sklearn.cluster._kmeans/_BaseKMeans/_check_params/X", "name": "X", - "qname": "sklearn.cluster._kmeans._BaseKMeans._check_params_vs_input.X", + "qname": "sklearn.cluster._kmeans._BaseKMeans._check_params.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", "is_public": false, @@ -42026,19 +41701,6 @@ "description": "" }, "type": {} - }, - { - "id": "sklearn/sklearn.cluster._kmeans/_BaseKMeans/_check_params_vs_input/default_n_init", - "name": "default_n_init", - "qname": "sklearn.cluster._kmeans._BaseKMeans._check_params_vs_input.default_n_init", - "default_value": "None", - "assigned_by": "POSITION_OR_NAME", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} } ], "results": [], @@ -43678,25 +43340,16 @@ "id": "sklearn/sklearn.cluster._kmeans/k_means/n_init", "name": "n_init", "qname": "sklearn.cluster._kmeans.k_means.n_init", - "default_value": "'warn'", + "default_value": "10", "assigned_by": "NAME_ONLY", "is_public": true, "docstring": { - "type": "'auto' or int, default=10", - "description": "Number of time the k-means algorithm will be run with different\ncentroid seeds. The final results will be the best output of\nn_init consecutive runs in terms of inertia.\n\nWhen `n_init='auto'`, the number of runs will be 10 if using\n`init='random'`, and 1 if using `init='kmeans++'`.\n\n.. versionadded:: 1.2\n Added 'auto' option for `n_init`.\n\n.. versionchanged:: 1.4\n Default value for `n_init` will change from 10 to `'auto'` in version 1.4." + "type": "int, default=10", + "description": "Number of time the k-means algorithm will be run with different\ncentroid seeds. The final results will be the best output of\n`n_init` consecutive runs in terms of inertia." }, "type": { - "kind": "UnionType", - "types": [ - { - "kind": "NamedType", - "name": "'auto'" - }, - { - "kind": "NamedType", - "name": "int" - } - ] + "kind": "NamedType", + "name": "int" } }, { @@ -43805,7 +43458,7 @@ }, "type": { "kind": "EnumType", - "values": ["full", "lloyd", "elkan", "auto"] + "values": ["auto", "lloyd", "elkan", "full"] } }, { @@ -43828,15 +43481,13 @@ "results": [], "is_public": true, "description": "Perform K-means clustering algorithm.\n\nRead more in the :ref:`User Guide `.", - "docstring": "Perform K-means clustering algorithm.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The observations to cluster. It must be noted that the data\n will be converted to C ordering, which will cause a memory copy\n if the given data is not C-contiguous.\n\n n_clusters : int\n The number of clusters to form as well as the number of\n centroids to generate.\n\n sample_weight : array-like of shape (n_samples,), default=None\n The weights for each observation in `X`. If `None`, all observations\n are assigned equal weight.\n\n init : {'k-means++', 'random'}, callable or array-like of shape (n_clusters, n_features), default='k-means++'\n Method for initialization:\n\n - `'k-means++'` : selects initial cluster centers for k-mean\n clustering in a smart way to speed up convergence. See section\n Notes in k_init for more details.\n - `'random'`: choose `n_clusters` observations (rows) at random from data\n for the initial centroids.\n - If an array is passed, it should be of shape `(n_clusters, n_features)`\n and gives the initial centers.\n - If a callable is passed, it should take arguments `X`, `n_clusters` and a\n random state and return an initialization.\n\n n_init : 'auto' or int, default=10\n Number of time the k-means algorithm will be run with different\n centroid seeds. The final results will be the best output of\n n_init consecutive runs in terms of inertia.\n\n When `n_init='auto'`, the number of runs will be 10 if using\n `init='random'`, and 1 if using `init='kmeans++'`.\n\n .. versionadded:: 1.2\n Added 'auto' option for `n_init`.\n\n .. versionchanged:: 1.4\n Default value for `n_init` will change from 10 to `'auto'` in version 1.4.\n\n max_iter : int, default=300\n Maximum number of iterations of the k-means algorithm to run.\n\n verbose : bool, default=False\n Verbosity mode.\n\n tol : float, default=1e-4\n Relative tolerance with regards to Frobenius norm of the difference\n in the cluster centers of two consecutive iterations to declare\n convergence.\n\n random_state : int, RandomState instance or None, default=None\n Determines random number generation for centroid initialization. Use\n an int to make the randomness deterministic.\n See :term:`Glossary `.\n\n copy_x : bool, default=True\n When pre-computing distances it is more numerically accurate to center\n the data first. If `copy_x` is True (default), then the original data is\n not modified. If False, the original data is modified, and put back\n before the function returns, but small numerical differences may be\n introduced by subtracting and then adding the data mean. Note that if\n the original data is not C-contiguous, a copy will be made even if\n `copy_x` is False. If the original data is sparse, but not in CSR format,\n a copy will be made even if `copy_x` is False.\n\n algorithm : {\"lloyd\", \"elkan\", \"auto\", \"full\"}, default=\"lloyd\"\n K-means algorithm to use. The classical EM-style algorithm is `\"lloyd\"`.\n The `\"elkan\"` variation can be more efficient on some datasets with\n well-defined clusters, by using the triangle inequality. However it's\n more memory intensive due to the allocation of an extra array of shape\n `(n_samples, n_clusters)`.\n\n `\"auto\"` and `\"full\"` are deprecated and they will be removed in\n Scikit-Learn 1.3. They are both aliases for `\"lloyd\"`.\n\n .. versionchanged:: 0.18\n Added Elkan algorithm\n\n .. versionchanged:: 1.1\n Renamed \"full\" to \"lloyd\", and deprecated \"auto\" and \"full\".\n Changed \"auto\" to use \"lloyd\" instead of \"elkan\".\n\n return_n_iter : bool, default=False\n Whether or not to return the number of iterations.\n\n Returns\n -------\n centroid : ndarray of shape (n_clusters, n_features)\n Centroids found at the last iteration of k-means.\n\n label : ndarray of shape (n_samples,)\n The `label[i]` is the code or index of the centroid the\n i'th observation is closest to.\n\n inertia : float\n The final value of the inertia criterion (sum of squared distances to\n the closest centroid for all observations in the training set).\n\n best_n_iter : int\n Number of iterations corresponding to the best results.\n Returned only if `return_n_iter` is set to True.\n " + "docstring": "Perform K-means clustering algorithm.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The observations to cluster. It must be noted that the data\n will be converted to C ordering, which will cause a memory copy\n if the given data is not C-contiguous.\n\n n_clusters : int\n The number of clusters to form as well as the number of\n centroids to generate.\n\n sample_weight : array-like of shape (n_samples,), default=None\n The weights for each observation in `X`. If `None`, all observations\n are assigned equal weight.\n\n init : {'k-means++', 'random'}, callable or array-like of shape (n_clusters, n_features), default='k-means++'\n Method for initialization:\n\n - `'k-means++'` : selects initial cluster centers for k-mean\n clustering in a smart way to speed up convergence. See section\n Notes in k_init for more details.\n - `'random'`: choose `n_clusters` observations (rows) at random from data\n for the initial centroids.\n - If an array is passed, it should be of shape `(n_clusters, n_features)`\n and gives the initial centers.\n - If a callable is passed, it should take arguments `X`, `n_clusters` and a\n random state and return an initialization.\n\n n_init : int, default=10\n Number of time the k-means algorithm will be run with different\n centroid seeds. The final results will be the best output of\n `n_init` consecutive runs in terms of inertia.\n\n max_iter : int, default=300\n Maximum number of iterations of the k-means algorithm to run.\n\n verbose : bool, default=False\n Verbosity mode.\n\n tol : float, default=1e-4\n Relative tolerance with regards to Frobenius norm of the difference\n in the cluster centers of two consecutive iterations to declare\n convergence.\n\n random_state : int, RandomState instance or None, default=None\n Determines random number generation for centroid initialization. Use\n an int to make the randomness deterministic.\n See :term:`Glossary `.\n\n copy_x : bool, default=True\n When pre-computing distances it is more numerically accurate to center\n the data first. If `copy_x` is True (default), then the original data is\n not modified. If False, the original data is modified, and put back\n before the function returns, but small numerical differences may be\n introduced by subtracting and then adding the data mean. Note that if\n the original data is not C-contiguous, a copy will be made even if\n `copy_x` is False. If the original data is sparse, but not in CSR format,\n a copy will be made even if `copy_x` is False.\n\n algorithm : {\"lloyd\", \"elkan\", \"auto\", \"full\"}, default=\"lloyd\"\n K-means algorithm to use. The classical EM-style algorithm is `\"lloyd\"`.\n The `\"elkan\"` variation can be more efficient on some datasets with\n well-defined clusters, by using the triangle inequality. However it's\n more memory intensive due to the allocation of an extra array of shape\n `(n_samples, n_clusters)`.\n\n `\"auto\"` and `\"full\"` are deprecated and they will be removed in\n Scikit-Learn 1.3. They are both aliases for `\"lloyd\"`.\n\n .. versionchanged:: 0.18\n Added Elkan algorithm\n\n .. versionchanged:: 1.1\n Renamed \"full\" to \"lloyd\", and deprecated \"auto\" and \"full\".\n Changed \"auto\" to use \"lloyd\" instead of \"elkan\".\n\n return_n_iter : bool, default=False\n Whether or not to return the number of iterations.\n\n Returns\n -------\n centroid : ndarray of shape (n_clusters, n_features)\n Centroids found at the last iteration of k-means.\n\n label : ndarray of shape (n_samples,)\n The `label[i]` is the code or index of the centroid the\n i'th observation is closest to.\n\n inertia : float\n The final value of the inertia criterion (sum of squared distances to\n the closest centroid for all observations in the training set).\n\n best_n_iter : int\n Number of iterations corresponding to the best results.\n Returned only if `return_n_iter` is set to True.\n " }, { "id": "sklearn/sklearn.cluster._kmeans/kmeans_plusplus", "name": "kmeans_plusplus", "qname": "sklearn.cluster._kmeans.kmeans_plusplus", - "decorators": [ - "validate_params({'X': ['array-like', 'sparse matrix'], 'n_clusters': [Interval(Integral, 1, None, closed='left')], 'x_squared_norms': ['array-like', None], 'random_state': ['random_state'], 'n_local_trials': [Interval(Integral, 1, None, closed='left'), None]})" - ], + "decorators": [], "parameters": [ { "id": "sklearn/sklearn.cluster._kmeans/kmeans_plusplus/X", @@ -43954,7 +43605,7 @@ "qname": "sklearn.cluster._mean_shift.MeanShift.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -43967,7 +43618,7 @@ "qname": "sklearn.cluster._mean_shift.MeanShift.__init__.bandwidth", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=None", "description": "Bandwidth used in the RBF kernel.\n\nIf not given, the bandwidth is estimated using\nsklearn.cluster.estimate_bandwidth; see the documentation for that\nfunction for hints on scalability (see also the Notes, below)." @@ -43983,7 +43634,7 @@ "qname": "sklearn.cluster._mean_shift.MeanShift.__init__.seeds", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features), default=None", "description": "Seeds used to initialize kernels. If not set,\nthe seeds are calculated by clustering.get_bin_seeds\nwith bandwidth as the grid size and default values for\nother parameters." @@ -43999,7 +43650,7 @@ "qname": "sklearn.cluster._mean_shift.MeanShift.__init__.bin_seeding", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If true, initial kernel locations are not locations of all\npoints, but rather the location of the discretized version of\npoints, where points are binned onto a grid whose coarseness\ncorresponds to the bandwidth. Setting this option to True will speed\nup the algorithm because fewer seeds will be initialized.\nThe default value is False.\nIgnored if seeds argument is not None." @@ -44015,7 +43666,7 @@ "qname": "sklearn.cluster._mean_shift.MeanShift.__init__.min_bin_freq", "default_value": "1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1", "description": "To speed up the algorithm, accept only those bins with at least\nmin_bin_freq points as seeds." @@ -44031,7 +43682,7 @@ "qname": "sklearn.cluster._mean_shift.MeanShift.__init__.cluster_all", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If true, then all points are clustered, even those orphans that are\nnot within any kernel. Orphans are assigned to the nearest kernel.\nIf false, then orphans are given cluster label -1." @@ -44047,7 +43698,7 @@ "qname": "sklearn.cluster._mean_shift.MeanShift.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "The number of jobs to use for the computation. This works by computing\neach of the n_init runs in parallel.\n\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details." @@ -44063,7 +43714,7 @@ "qname": "sklearn.cluster._mean_shift.MeanShift.__init__.max_iter", "default_value": "300", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=300", "description": "Maximum number of iterations, per seed point before the clustering\noperation terminates (for that seed point), if has not converged yet.\n\n.. versionadded:: 0.22" @@ -44075,7 +43726,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -44091,7 +43742,7 @@ "qname": "sklearn.cluster._mean_shift.MeanShift.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -44104,7 +43755,7 @@ "qname": "sklearn.cluster._mean_shift.MeanShift.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Samples to cluster." @@ -44120,7 +43771,7 @@ "qname": "sklearn.cluster._mean_shift.MeanShift.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -44132,7 +43783,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Perform clustering.", "docstring": "Perform clustering.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Samples to cluster.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n self : object\n Fitted instance.\n " }, @@ -44148,7 +43799,7 @@ "qname": "sklearn.cluster._mean_shift.MeanShift.predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -44161,7 +43812,7 @@ "qname": "sklearn.cluster._mean_shift.MeanShift.predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "New data to predict." @@ -44173,7 +43824,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict the closest cluster each sample in X belongs to.", "docstring": "Predict the closest cluster each sample in X belongs to.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n New data to predict.\n\n Returns\n -------\n labels : ndarray of shape (n_samples,)\n Index of the cluster each sample belongs to.\n " }, @@ -44563,7 +44214,7 @@ "qname": "sklearn.cluster._optics.OPTICS.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -44576,7 +44227,7 @@ "qname": "sklearn.cluster._optics.OPTICS.__init__.min_samples", "default_value": "5", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int > 1 or float between 0 and 1, default=5", "description": "The number of samples in a neighborhood for a point to be considered as\na core point. Also, up and down steep regions can't have more than\n``min_samples`` consecutive non-steep points. Expressed as an absolute\nnumber or a fraction of the number of samples (rounded to be at least\n2)." @@ -44601,7 +44252,7 @@ "qname": "sklearn.cluster._optics.OPTICS.__init__.max_eps", "default_value": "np.inf", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=np.inf", "description": "The maximum distance between two samples for one to be considered as\nin the neighborhood of the other. Default value of ``np.inf`` will\nidentify clusters across all scales; reducing ``max_eps`` will result\nin shorter run times." @@ -44617,10 +44268,10 @@ "qname": "sklearn.cluster._optics.OPTICS.__init__.metric", "default_value": "'minkowski'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str or callable, default='minkowski'", - "description": "Metric to use for distance computation. Any metric from scikit-learn\nor scipy.spatial.distance can be used.\n\nIf metric is a callable function, it is called on each\npair of instances (rows) and the resulting value recorded. The callable\nshould take two arrays as input and return one value indicating the\ndistance between them. This works for Scipy's metrics, but is less\nefficient than passing the metric name as a string. If metric is\n\"precomputed\", `X` is assumed to be a distance matrix and must be\nsquare.\n\nValid values for metric are:\n\n- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',\n 'manhattan']\n\n- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',\n 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',\n 'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',\n 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',\n 'yule']\n\nSparse matrices are only supported by scikit-learn metrics.\nSee the documentation for scipy.spatial.distance for details on these\nmetrics." + "description": "Metric to use for distance computation. Any metric from scikit-learn\nor scipy.spatial.distance can be used.\n\nIf metric is a callable function, it is called on each\npair of instances (rows) and the resulting value recorded. The callable\nshould take two arrays as input and return one value indicating the\ndistance between them. This works for Scipy's metrics, but is less\nefficient than passing the metric name as a string. If metric is\n\"precomputed\", `X` is assumed to be a distance matrix and must be\nsquare.\n\nValid values for metric are:\n\n- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',\n 'manhattan']\n\n- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',\n 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',\n 'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',\n 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',\n 'yule']\n\nSee the documentation for scipy.spatial.distance for details on these\nmetrics." }, "type": { "kind": "UnionType", @@ -44642,7 +44293,7 @@ "qname": "sklearn.cluster._optics.OPTICS.__init__.p", "default_value": "2", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=2", "description": "Parameter for the Minkowski metric from\n:class:`~sklearn.metrics.pairwise_distances`. When p = 1, this is\nequivalent to using manhattan_distance (l1), and euclidean_distance\n(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used." @@ -44658,7 +44309,7 @@ "qname": "sklearn.cluster._optics.OPTICS.__init__.metric_params", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict, default=None", "description": "Additional keyword arguments for the metric function." @@ -44674,7 +44325,7 @@ "qname": "sklearn.cluster._optics.OPTICS.__init__.cluster_method", "default_value": "'xi'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str, default='xi'", "description": "The extraction method used to extract clusters using the calculated\nreachability and ordering. Possible values are \"xi\" and \"dbscan\"." @@ -44690,7 +44341,7 @@ "qname": "sklearn.cluster._optics.OPTICS.__init__.eps", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=None", "description": "The maximum distance between two samples for one to be considered as\nin the neighborhood of the other. By default it assumes the same value\nas ``max_eps``.\nUsed only when ``cluster_method='dbscan'``." @@ -44706,7 +44357,7 @@ "qname": "sklearn.cluster._optics.OPTICS.__init__.xi", "default_value": "0.05", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float between 0 and 1, default=0.05", "description": "Determines the minimum steepness on the reachability plot that\nconstitutes a cluster boundary. For example, an upwards point in the\nreachability plot is defined by the ratio from one point to its\nsuccessor being at most 1-xi.\nUsed only when ``cluster_method='xi'``." @@ -44722,7 +44373,7 @@ "qname": "sklearn.cluster._optics.OPTICS.__init__.predecessor_correction", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Correct clusters according to the predecessors calculated by OPTICS\n[2]_. This parameter has minimal effect on most datasets.\nUsed only when ``cluster_method='xi'``." @@ -44738,7 +44389,7 @@ "qname": "sklearn.cluster._optics.OPTICS.__init__.min_cluster_size", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int > 1 or float between 0 and 1, default=None", "description": "Minimum number of samples in an OPTICS cluster, expressed as an\nabsolute number or a fraction of the number of samples (rounded to be\nat least 2). If ``None``, the value of ``min_samples`` is used instead.\nUsed only when ``cluster_method='xi'``." @@ -44763,14 +44414,14 @@ "qname": "sklearn.cluster._optics.OPTICS.__init__.algorithm", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'", "description": "Algorithm used to compute the nearest neighbors:\n\n- 'ball_tree' will use :class:`BallTree`.\n- 'kd_tree' will use :class:`KDTree`.\n- 'brute' will use a brute-force search.\n- 'auto' (default) will attempt to decide the most appropriate\n algorithm based on the values passed to :meth:`fit` method.\n\nNote: fitting on sparse input will override the setting of\nthis parameter, using brute force." }, "type": { "kind": "EnumType", - "values": ["auto", "kd_tree", "ball_tree", "brute"] + "values": ["auto", "kd_tree", "brute", "ball_tree"] } }, { @@ -44779,7 +44430,7 @@ "qname": "sklearn.cluster._optics.OPTICS.__init__.leaf_size", "default_value": "30", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=30", "description": "Leaf size passed to :class:`BallTree` or :class:`KDTree`. This can\naffect the speed of the construction and query, as well as the memory\nrequired to store the tree. The optimal value depends on the\nnature of the problem." @@ -44795,7 +44446,7 @@ "qname": "sklearn.cluster._optics.OPTICS.__init__.memory", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str or object with the joblib.Memory interface, default=None", "description": "Used to cache the output of the computation of the tree.\nBy default, no caching is done. If a string is given, it is the\npath to the caching directory." @@ -44820,7 +44471,7 @@ "qname": "sklearn.cluster._optics.OPTICS.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "The number of parallel jobs to run for neighbors search.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details." @@ -44832,7 +44483,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -44848,7 +44499,7 @@ "qname": "sklearn.cluster._optics.OPTICS.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -44861,21 +44512,17 @@ "qname": "sklearn.cluster._optics.OPTICS.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { - "type": "{ndarray, sparse matrix} of shape (n_samples, n_features), or (n_samples, n_samples) if metric=\u2019precomputed\u2019", - "description": "A feature array, or array of distances between samples if\nmetric='precomputed'. If a sparse matrix is provided, it will be\nconverted into CSR format." + "type": "ndarray of shape (n_samples, n_features), or (n_samples, n_samples) if metric=\u2019precomputed\u2019", + "description": "A feature array, or array of distances between samples if\nmetric='precomputed'." }, "type": { "kind": "UnionType", "types": [ - { - "kind": "EnumType", - "values": [] - }, { "kind": "NamedType", - "name": "of shape (n_samples, n_features)" + "name": "ndarray of shape (n_samples, n_features)" }, { "kind": "NamedType", @@ -44890,7 +44537,7 @@ "qname": "sklearn.cluster._optics.OPTICS.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -44902,9 +44549,9 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Perform OPTICS clustering.\n\nExtracts an ordered list of points and reachability distances, and\nperforms initial clustering using ``max_eps`` distance specified at\nOPTICS object instantiation.", - "docstring": "Perform OPTICS clustering.\n\n Extracts an ordered list of points and reachability distances, and\n performs initial clustering using ``max_eps`` distance specified at\n OPTICS object instantiation.\n\n Parameters\n ----------\n X : {ndarray, sparse matrix} of shape (n_samples, n_features), or (n_samples, n_samples) if metric=\u2019precomputed\u2019\n A feature array, or array of distances between samples if\n metric='precomputed'. If a sparse matrix is provided, it will be\n converted into CSR format.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n self : object\n Returns a fitted instance of self.\n " + "docstring": "Perform OPTICS clustering.\n\n Extracts an ordered list of points and reachability distances, and\n performs initial clustering using ``max_eps`` distance specified at\n OPTICS object instantiation.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features), or (n_samples, n_samples) if metric=\u2019precomputed\u2019\n A feature array, or array of distances between samples if\n metric='precomputed'.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n self : object\n Returns a fitted instance of self.\n " }, { "id": "sklearn/sklearn.cluster._optics/_compute_core_distances_", @@ -45954,7 +45601,7 @@ }, "type": { "kind": "EnumType", - "values": ["auto", "kd_tree", "ball_tree", "brute"] + "values": ["auto", "kd_tree", "brute", "ball_tree"] } }, { @@ -46007,7 +45654,7 @@ "qname": "sklearn.cluster._spectral.SpectralClustering.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -46020,7 +45667,7 @@ "qname": "sklearn.cluster._spectral.SpectralClustering.__init__.n_clusters", "default_value": "8", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=8", "description": "The dimension of the projection subspace." @@ -46036,14 +45683,14 @@ "qname": "sklearn.cluster._spectral.SpectralClustering.__init__.eigen_solver", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'arpack', 'lobpcg', 'amg'}, default=None", "description": "The eigenvalue decomposition strategy to use. AMG requires pyamg\nto be installed. It can be faster on very large, sparse problems,\nbut may also lead to instabilities. If None, then ``'arpack'`` is\nused. See [4]_ for more details regarding `'lobpcg'`." }, "type": { "kind": "EnumType", - "values": ["arpack", "lobpcg", "amg"] + "values": ["arpack", "amg", "lobpcg"] } }, { @@ -46052,7 +45699,7 @@ "qname": "sklearn.cluster._spectral.SpectralClustering.__init__.n_components", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=n_clusters", "description": "Number of eigenvectors to use for the spectral embedding." @@ -46068,7 +45715,7 @@ "qname": "sklearn.cluster._spectral.SpectralClustering.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance, default=None", "description": "A pseudo random number generator used for the initialization\nof the lobpcg eigenvectors decomposition when `eigen_solver ==\n'amg'`, and for the K-Means initialization. Use an int to make\nthe results deterministic across calls (See\n:term:`Glossary `).\n\n.. note::\n When using `eigen_solver == 'amg'`,\n it is necessary to also fix the global numpy seed with\n `np.random.seed(int)` to get deterministic results. See\n https://github.com/pyamg/pyamg/issues/139 for further\n information." @@ -46093,7 +45740,7 @@ "qname": "sklearn.cluster._spectral.SpectralClustering.__init__.n_init", "default_value": "10", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=10", "description": "Number of time the k-means algorithm will be run with different\ncentroid seeds. The final results will be the best output of n_init\nconsecutive runs in terms of inertia. Only used if\n``assign_labels='kmeans'``." @@ -46109,7 +45756,7 @@ "qname": "sklearn.cluster._spectral.SpectralClustering.__init__.gamma", "default_value": "1.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1.0", "description": "Kernel coefficient for rbf, poly, sigmoid, laplacian and chi2 kernels.\nIgnored for ``affinity='nearest_neighbors'``." @@ -46125,7 +45772,7 @@ "qname": "sklearn.cluster._spectral.SpectralClustering.__init__.affinity", "default_value": "'rbf'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str or callable, default='rbf'", "description": "How to construct the affinity matrix.\n - 'nearest_neighbors': construct the affinity matrix by computing a\n graph of nearest neighbors.\n - 'rbf': construct the affinity matrix using a radial basis function\n (RBF) kernel.\n - 'precomputed': interpret ``X`` as a precomputed affinity matrix,\n where larger values indicate greater similarity between instances.\n - 'precomputed_nearest_neighbors': interpret ``X`` as a sparse graph\n of precomputed distances, and construct a binary affinity matrix\n from the ``n_neighbors`` nearest neighbors of each instance.\n - one of the kernels supported by\n :func:`~sklearn.metrics.pairwise_kernels`.\n\nOnly kernels that produce similarity scores (non-negative values that\nincrease with similarity) should be used. This property is not checked\nby the clustering algorithm." @@ -46150,7 +45797,7 @@ "qname": "sklearn.cluster._spectral.SpectralClustering.__init__.n_neighbors", "default_value": "10", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=10", "description": "Number of neighbors to use when constructing the affinity matrix using\nthe nearest neighbors method. Ignored for ``affinity='rbf'``." @@ -46166,7 +45813,7 @@ "qname": "sklearn.cluster._spectral.SpectralClustering.__init__.eigen_tol", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0", "description": "Stopping criterion for eigendecomposition of the Laplacian matrix\nwhen ``eigen_solver='arpack'``." @@ -46182,14 +45829,14 @@ "qname": "sklearn.cluster._spectral.SpectralClustering.__init__.assign_labels", "default_value": "'kmeans'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'kmeans', 'discretize', 'cluster_qr'}, default='kmeans'", "description": "The strategy for assigning labels in the embedding space. There are two\nways to assign labels after the Laplacian embedding. k-means is a\npopular choice, but it can be sensitive to initialization.\nDiscretization is another approach which is less sensitive to random\ninitialization [3]_.\nThe cluster_qr method [5]_ directly extract clusters from eigenvectors\nin spectral clustering. In contrast to k-means and discretization, cluster_qr\nhas no tuning parameters and runs no iterations, yet may outperform\nk-means and discretization in terms of both quality and speed.\n\n.. versionchanged:: 1.1\n Added new labeling method 'cluster_qr'." }, "type": { "kind": "EnumType", - "values": ["kmeans", "discretize", "cluster_qr"] + "values": ["discretize", "cluster_qr", "kmeans"] } }, { @@ -46198,7 +45845,7 @@ "qname": "sklearn.cluster._spectral.SpectralClustering.__init__.degree", "default_value": "3", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=3", "description": "Degree of the polynomial kernel. Ignored by other kernels." @@ -46214,7 +45861,7 @@ "qname": "sklearn.cluster._spectral.SpectralClustering.__init__.coef0", "default_value": "1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1", "description": "Zero coefficient for polynomial and sigmoid kernels.\nIgnored by other kernels." @@ -46230,7 +45877,7 @@ "qname": "sklearn.cluster._spectral.SpectralClustering.__init__.kernel_params", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict of str to any, default=None", "description": "Parameters (keyword arguments) and values for kernel passed as\ncallable object. Ignored by other kernels." @@ -46246,7 +45893,7 @@ "qname": "sklearn.cluster._spectral.SpectralClustering.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "The number of parallel jobs to run when `affinity='nearest_neighbors'`\nor `affinity='precomputed_nearest_neighbors'`. The neighbors search\nwill be done in parallel.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details." @@ -46262,7 +45909,7 @@ "qname": "sklearn.cluster._spectral.SpectralClustering.__init__.verbose", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Verbosity mode.\n\n.. versionadded:: 0.24" @@ -46274,7 +45921,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -46315,7 +45962,7 @@ "qname": "sklearn.cluster._spectral.SpectralClustering.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -46328,7 +45975,7 @@ "qname": "sklearn.cluster._spectral.SpectralClustering.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_samples)", "description": "Training instances to cluster, similarities / affinities between\ninstances if ``affinity='precomputed'``, or distances between\ninstances if ``affinity='precomputed_nearest_neighbors``. If a\nsparse matrix is provided in a format other than ``csr_matrix``,\n``csc_matrix``, or ``coo_matrix``, it will be converted into a\nsparse ``csr_matrix``." @@ -46353,7 +46000,7 @@ "qname": "sklearn.cluster._spectral.SpectralClustering.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present here for API consistency by convention." @@ -46365,7 +46012,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Perform spectral clustering from features, or affinity matrix.", "docstring": "Perform spectral clustering from features, or affinity matrix.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_samples)\n Training instances to cluster, similarities / affinities between\n instances if ``affinity='precomputed'``, or distances between\n instances if ``affinity='precomputed_nearest_neighbors``. If a\n sparse matrix is provided in a format other than ``csr_matrix``,\n ``csc_matrix``, or ``coo_matrix``, it will be converted into a\n sparse ``csr_matrix``.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n self : object\n A fitted instance of the estimator.\n " }, @@ -46381,7 +46028,7 @@ "qname": "sklearn.cluster._spectral.SpectralClustering.fit_predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -46394,7 +46041,7 @@ "qname": "sklearn.cluster._spectral.SpectralClustering.fit_predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_samples)", "description": "Training instances to cluster, similarities / affinities between\ninstances if ``affinity='precomputed'``, or distances between\ninstances if ``affinity='precomputed_nearest_neighbors``. If a\nsparse matrix is provided in a format other than ``csr_matrix``,\n``csc_matrix``, or ``coo_matrix``, it will be converted into a\nsparse ``csr_matrix``." @@ -46419,7 +46066,7 @@ "qname": "sklearn.cluster._spectral.SpectralClustering.fit_predict.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present here for API consistency by convention." @@ -46431,7 +46078,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Perform spectral clustering on `X` and return cluster labels.", "docstring": "Perform spectral clustering on `X` and return cluster labels.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_samples)\n Training instances to cluster, similarities / affinities between\n instances if ``affinity='precomputed'``, or distances between\n instances if ``affinity='precomputed_nearest_neighbors``. If a\n sparse matrix is provided in a format other than ``csr_matrix``,\n ``csc_matrix``, or ``coo_matrix``, it will be converted into a\n sparse ``csr_matrix``.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n labels : ndarray of shape (n_samples,)\n Cluster labels.\n " }, @@ -46649,7 +46296,7 @@ }, "type": { "kind": "EnumType", - "values": ["arpack", "lobpcg", "amg"] + "values": ["arpack", "amg", "lobpcg"] } }, { @@ -46722,7 +46369,7 @@ }, "type": { "kind": "EnumType", - "values": ["kmeans", "discretize", "cluster_qr"] + "values": ["discretize", "cluster_qr", "kmeans"] } }, { @@ -46797,7 +46444,7 @@ "qname": "sklearn.compose._column_transformer.ColumnTransformer.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -46810,7 +46457,7 @@ "qname": "sklearn.compose._column_transformer.ColumnTransformer.__init__.transformers", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "list of tuples", "description": "List of (name, transformer, columns) tuples specifying the\ntransformer objects to be applied to subsets of the data.\n\nname : str\n Like in Pipeline and FeatureUnion, this allows the transformer and\n its parameters to be set using ``set_params`` and searched in grid\n search.\ntransformer : {'drop', 'passthrough'} or estimator\n Estimator must support :term:`fit` and :term:`transform`.\n Special-cased strings 'drop' and 'passthrough' are accepted as\n well, to indicate to drop the columns or to pass them through\n untransformed, respectively.\ncolumns : str, array-like of str, int, array-like of int, array-like of bool, slice or callable\n Indexes the data on its second axis. Integers are interpreted as\n positional columns, while strings can reference DataFrame columns\n by name. A scalar string or int should be used where\n ``transformer`` expects X to be a 1d array-like (vector),\n otherwise a 2d array will be passed to the transformer.\n A callable is passed the input data `X` and can return any of the\n above. To select multiple columns by name or dtype, you can use\n :obj:`make_column_selector`." @@ -46826,7 +46473,7 @@ "qname": "sklearn.compose._column_transformer.ColumnTransformer.__init__.remainder", "default_value": "'drop'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'drop', 'passthrough'} or estimator, default='drop'", "description": "By default, only the specified columns in `transformers` are\ntransformed and combined in the output, and the non-specified\ncolumns are dropped. (default of ``'drop'``).\nBy specifying ``remainder='passthrough'``, all remaining columns that\nwere not specified in `transformers` will be automatically passed\nthrough. This subset of columns is concatenated with the output of\nthe transformers.\nBy setting ``remainder`` to be an estimator, the remaining\nnon-specified columns will use the ``remainder`` estimator. The\nestimator must support :term:`fit` and :term:`transform`.\nNote that using this feature requires that the DataFrame columns\ninput at :term:`fit` and :term:`transform` have identical order." @@ -46836,7 +46483,7 @@ "types": [ { "kind": "EnumType", - "values": ["drop", "passthrough"] + "values": ["passthrough", "drop"] }, { "kind": "NamedType", @@ -46851,7 +46498,7 @@ "qname": "sklearn.compose._column_transformer.ColumnTransformer.__init__.sparse_threshold", "default_value": "0.3", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.3", "description": "If the output of the different transformers contains sparse matrices,\nthese will be stacked as a sparse matrix if the overall density is\nlower than this value. Use ``sparse_threshold=0`` to always return\ndense. When the transformed output consists of all dense data, the\nstacked result will be dense, and this keyword will be ignored." @@ -46867,7 +46514,7 @@ "qname": "sklearn.compose._column_transformer.ColumnTransformer.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Number of jobs to run in parallel.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details." @@ -46883,7 +46530,7 @@ "qname": "sklearn.compose._column_transformer.ColumnTransformer.__init__.transformer_weights", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict, default=None", "description": "Multiplicative weights for features per transformer. The output of the\ntransformer is multiplied by these weights. Keys are transformer names,\nvalues the weights." @@ -46899,7 +46546,7 @@ "qname": "sklearn.compose._column_transformer.ColumnTransformer.__init__.verbose", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If True, the time elapsed while fitting each transformer will be\nprinted as it is completed." @@ -46915,7 +46562,7 @@ "qname": "sklearn.compose._column_transformer.ColumnTransformer.__init__.verbose_feature_names_out", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If True, :meth:`get_feature_names_out` will prefix all feature names\nwith the name of the transformer that generated that feature.\nIf False, :meth:`get_feature_names_out` will not prefix any feature\nnames and will error if feature names are not unique.\n\n.. versionadded:: 1.0" @@ -46927,7 +46574,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -47591,7 +47238,7 @@ "qname": "sklearn.compose._column_transformer.ColumnTransformer.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -47604,7 +47251,7 @@ "qname": "sklearn.compose._column_transformer.ColumnTransformer.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, dataframe} of shape (n_samples, n_features)", "description": "Input data, of which specified subsets are used to fit the\ntransformers." @@ -47629,7 +47276,7 @@ "qname": "sklearn.compose._column_transformer.ColumnTransformer.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,...), default=None", "description": "Targets for supervised learning." @@ -47641,7 +47288,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit all transformers using X.", "docstring": "Fit all transformers using X.\n\n Parameters\n ----------\n X : {array-like, dataframe} of shape (n_samples, n_features)\n Input data, of which specified subsets are used to fit the\n transformers.\n\n y : array-like of shape (n_samples,...), default=None\n Targets for supervised learning.\n\n Returns\n -------\n self : ColumnTransformer\n This estimator.\n " }, @@ -47657,7 +47304,7 @@ "qname": "sklearn.compose._column_transformer.ColumnTransformer.fit_transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -47670,7 +47317,7 @@ "qname": "sklearn.compose._column_transformer.ColumnTransformer.fit_transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, dataframe} of shape (n_samples, n_features)", "description": "Input data, of which specified subsets are used to fit the\ntransformers." @@ -47695,7 +47342,7 @@ "qname": "sklearn.compose._column_transformer.ColumnTransformer.fit_transform.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "Targets for supervised learning." @@ -47707,7 +47354,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit all transformers, transform the data and concatenate results.", "docstring": "Fit all transformers, transform the data and concatenate results.\n\n Parameters\n ----------\n X : {array-like, dataframe} of shape (n_samples, n_features)\n Input data, of which specified subsets are used to fit the\n transformers.\n\n y : array-like of shape (n_samples,), default=None\n Targets for supervised learning.\n\n Returns\n -------\n X_t : {array-like, sparse matrix} of shape (n_samples, sum_n_components)\n Horizontally stacked results of transformers. sum_n_components is the\n sum of n_components (output dimension) over transformers. If\n any result is a sparse matrix, everything will be converted to\n sparse matrices.\n " }, @@ -47725,7 +47372,7 @@ "qname": "sklearn.compose._column_transformer.ColumnTransformer.get_feature_names.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -47734,7 +47381,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Get feature names from all transformers.", "docstring": "Get feature names from all transformers.\n\n Returns\n -------\n feature_names : list of strings\n Names of the features produced by transform.\n " }, @@ -47750,7 +47397,7 @@ "qname": "sklearn.compose._column_transformer.ColumnTransformer.get_feature_names_out.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -47763,7 +47410,7 @@ "qname": "sklearn.compose._column_transformer.ColumnTransformer.get_feature_names_out.input_features", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of str or None, default=None", "description": "Input features.\n\n- If `input_features` is `None`, then `feature_names_in_` is\n used as feature names in. If `feature_names_in_` is not defined,\n then the following input feature names are generated:\n `[\"x0\", \"x1\", ..., \"x(n_features_in_ - 1)\"]`.\n- If `input_features` is an array-like, then `input_features` must\n match `feature_names_in_` if `feature_names_in_` is defined." @@ -47784,7 +47431,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Get output feature names for transformation.", "docstring": "Get output feature names for transformation.\n\n Parameters\n ----------\n input_features : array-like of str or None, default=None\n Input features.\n\n - If `input_features` is `None`, then `feature_names_in_` is\n used as feature names in. If `feature_names_in_` is not defined,\n then the following input feature names are generated:\n `[\"x0\", \"x1\", ..., \"x(n_features_in_ - 1)\"]`.\n - If `input_features` is an array-like, then `input_features` must\n match `feature_names_in_` if `feature_names_in_` is defined.\n\n Returns\n -------\n feature_names_out : ndarray of str objects\n Transformed feature names.\n " }, @@ -47800,7 +47447,7 @@ "qname": "sklearn.compose._column_transformer.ColumnTransformer.get_params.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -47813,7 +47460,7 @@ "qname": "sklearn.compose._column_transformer.ColumnTransformer.get_params.deep", "default_value": "True", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If True, will return the parameters for this estimator and\ncontained subobjects that are estimators." @@ -47825,7 +47472,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Get parameters for this estimator.\n\nReturns the parameters given in the constructor as well as the\nestimators contained within the `transformers` of the\n`ColumnTransformer`.", "docstring": "Get parameters for this estimator.\n\n Returns the parameters given in the constructor as well as the\n estimators contained within the `transformers` of the\n `ColumnTransformer`.\n\n Parameters\n ----------\n deep : bool, default=True\n If True, will return the parameters for this estimator and\n contained subobjects that are estimators.\n\n Returns\n -------\n params : dict\n Parameter names mapped to their values.\n " }, @@ -47841,7 +47488,7 @@ "qname": "sklearn.compose._column_transformer.ColumnTransformer.named_transformers_.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -47850,7 +47497,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Access the fitted transformer by name.\n\nRead-only attribute to access any transformer by given name.\nKeys are transformer names and values are the fitted transformer\nobjects.", "docstring": "Access the fitted transformer by name.\n\n Read-only attribute to access any transformer by given name.\n Keys are transformer names and values are the fitted transformer\n objects.\n " }, @@ -47866,7 +47513,7 @@ "qname": "sklearn.compose._column_transformer.ColumnTransformer.set_params.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -47875,7 +47522,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Set the parameters of this estimator.\n\nValid parameter keys can be listed with ``get_params()``. Note that you\ncan directly set the parameters of the estimators contained in\n`transformers` of `ColumnTransformer`.", "docstring": "Set the parameters of this estimator.\n\n Valid parameter keys can be listed with ``get_params()``. Note that you\n can directly set the parameters of the estimators contained in\n `transformers` of `ColumnTransformer`.\n\n Parameters\n ----------\n **kwargs : dict\n Estimator parameters.\n\n Returns\n -------\n self : ColumnTransformer\n This estimator.\n " }, @@ -47891,7 +47538,7 @@ "qname": "sklearn.compose._column_transformer.ColumnTransformer.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -47904,7 +47551,7 @@ "qname": "sklearn.compose._column_transformer.ColumnTransformer.transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, dataframe} of shape (n_samples, n_features)", "description": "The data to be transformed by subset." @@ -47925,7 +47572,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Transform X separately by each transformer, concatenate results.", "docstring": "Transform X separately by each transformer, concatenate results.\n\n Parameters\n ----------\n X : {array-like, dataframe} of shape (n_samples, n_features)\n The data to be transformed by subset.\n\n Returns\n -------\n X_t : {array-like, sparse matrix} of shape (n_samples, sum_n_components)\n Horizontally stacked results of transformers. sum_n_components is the\n sum of n_components (output dimension) over transformers. If\n any result is a sparse matrix, everything will be converted to\n sparse matrices.\n " }, @@ -48016,7 +47663,7 @@ "qname": "sklearn.compose._column_transformer.make_column_selector.__call__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -48029,7 +47676,7 @@ "qname": "sklearn.compose._column_transformer.make_column_selector.__call__.df", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "dataframe of shape (n_features, n_samples)", "description": "DataFrame to select columns from." @@ -48041,7 +47688,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Callable for column selection to be used by a\n:class:`ColumnTransformer`.", "docstring": "Callable for column selection to be used by a\n :class:`ColumnTransformer`.\n\n Parameters\n ----------\n df : dataframe of shape (n_features, n_samples)\n DataFrame to select columns from.\n " }, @@ -48057,7 +47704,7 @@ "qname": "sklearn.compose._column_transformer.make_column_selector.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -48070,7 +47717,7 @@ "qname": "sklearn.compose._column_transformer.make_column_selector.__init__.pattern", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "str, default=None", "description": "Name of columns containing this regex pattern will be included. If\nNone, column selection will not be selected based on pattern." @@ -48086,7 +47733,7 @@ "qname": "sklearn.compose._column_transformer.make_column_selector.__init__.dtype_include", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "column dtype or list of column dtypes, default=None", "description": "A selection of dtypes to include. For more details, see\n:meth:`pandas.DataFrame.select_dtypes`." @@ -48111,7 +47758,7 @@ "qname": "sklearn.compose._column_transformer.make_column_selector.__init__.dtype_exclude", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "column dtype or list of column dtypes, default=None", "description": "A selection of dtypes to exclude. For more details, see\n:meth:`pandas.DataFrame.select_dtypes`." @@ -48132,7 +47779,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -48158,7 +47805,7 @@ "types": [ { "kind": "EnumType", - "values": ["drop", "passthrough"] + "values": ["passthrough", "drop"] }, { "kind": "NamedType", @@ -48249,7 +47896,7 @@ "qname": "sklearn.compose._target.TransformedTargetRegressor.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -48262,7 +47909,7 @@ "qname": "sklearn.compose._target.TransformedTargetRegressor.__init__.regressor", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "object, default=None", "description": "Regressor object such as derived from\n:class:`~sklearn.base.RegressorMixin`. This regressor will\nautomatically be cloned each time prior to fitting. If `regressor is\nNone`, :class:`~sklearn.linear_model.LinearRegression` is created and used." @@ -48278,7 +47925,7 @@ "qname": "sklearn.compose._target.TransformedTargetRegressor.__init__.transformer", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "object, default=None", "description": "Estimator object such as derived from\n:class:`~sklearn.base.TransformerMixin`. Cannot be set at the same time\nas `func` and `inverse_func`. If `transformer is None` as well as\n`func` and `inverse_func`, the transformer will be an identity\ntransformer. Note that the transformer will be cloned during fitting.\nAlso, the transformer is restricting `y` to be a numpy array." @@ -48294,7 +47941,7 @@ "qname": "sklearn.compose._target.TransformedTargetRegressor.__init__.func", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "function, default=None", "description": "Function to apply to `y` before passing to :meth:`fit`. Cannot be set\nat the same time as `transformer`. The function needs to return a\n2-dimensional array. If `func is None`, the function used will be the\nidentity function." @@ -48310,7 +47957,7 @@ "qname": "sklearn.compose._target.TransformedTargetRegressor.__init__.inverse_func", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "function, default=None", "description": "Function to apply to the prediction of the regressor. Cannot be set at\nthe same time as `transformer`. The function needs to return a\n2-dimensional array. The inverse function is used to return\npredictions to the same space of the original training labels." @@ -48326,7 +47973,7 @@ "qname": "sklearn.compose._target.TransformedTargetRegressor.__init__.check_inverse", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to check that `transform` followed by `inverse_transform`\nor `func` followed by `inverse_func` leads to the original targets." @@ -48338,7 +47985,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -48417,7 +48064,7 @@ "qname": "sklearn.compose._target.TransformedTargetRegressor.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -48430,7 +48077,7 @@ "qname": "sklearn.compose._target.TransformedTargetRegressor.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Training vector, where `n_samples` is the number of samples and\n`n_features` is the number of features." @@ -48455,7 +48102,7 @@ "qname": "sklearn.compose._target.TransformedTargetRegressor.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "Target values." @@ -48467,7 +48114,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the model according to the given training data.", "docstring": "Fit the model according to the given training data.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vector, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n y : array-like of shape (n_samples,)\n Target values.\n\n **fit_params : dict\n Parameters passed to the `fit` method of the underlying\n regressor.\n\n Returns\n -------\n self : object\n Fitted estimator.\n " }, @@ -48483,7 +48130,7 @@ "qname": "sklearn.compose._target.TransformedTargetRegressor.n_features_in_.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -48492,7 +48139,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Number of features seen during :term:`fit`.", "docstring": "Number of features seen during :term:`fit`." }, @@ -48508,7 +48155,7 @@ "qname": "sklearn.compose._target.TransformedTargetRegressor.predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -48521,7 +48168,7 @@ "qname": "sklearn.compose._target.TransformedTargetRegressor.predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Samples." @@ -48542,7 +48189,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict using the base regressor, applying inverse.\n\nThe regressor is used to predict and the `inverse_func` or\n`inverse_transform` is applied before returning the prediction.", "docstring": "Predict using the base regressor, applying inverse.\n\n The regressor is used to predict and the `inverse_func` or\n `inverse_transform` is applied before returning the prediction.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Samples.\n\n **predict_params : dict of str -> object\n Parameters passed to the `predict` method of the underlying\n regressor.\n\n Returns\n -------\n y_hat : ndarray of shape (n_samples,)\n Predicted values.\n " }, @@ -48716,7 +48363,7 @@ "qname": "sklearn.covariance._elliptic_envelope.EllipticEnvelope.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -48729,7 +48376,7 @@ "qname": "sklearn.covariance._elliptic_envelope.EllipticEnvelope.__init__.store_precision", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Specify if the estimated precision is stored." @@ -48745,7 +48392,7 @@ "qname": "sklearn.covariance._elliptic_envelope.EllipticEnvelope.__init__.assume_centered", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If True, the support of robust location and covariance estimates\nis computed, and a covariance estimate is recomputed from it,\nwithout centering the data.\nUseful to work with data whose mean is significantly equal to\nzero but is not exactly zero.\nIf False, the robust location and covariance are directly computed\nwith the FastMCD algorithm without additional treatment." @@ -48761,7 +48408,7 @@ "qname": "sklearn.covariance._elliptic_envelope.EllipticEnvelope.__init__.support_fraction", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=None", "description": "The proportion of points to be included in the support of the raw\nMCD estimate. If None, the minimum value of support_fraction will\nbe used within the algorithm: `[n_sample + n_features + 1] / 2`.\nRange is (0, 1)." @@ -48777,7 +48424,7 @@ "qname": "sklearn.covariance._elliptic_envelope.EllipticEnvelope.__init__.contamination", "default_value": "0.1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.1", "description": "The amount of contamination of the data set, i.e. the proportion\nof outliers in the data set. Range is (0, 0.5]." @@ -48793,7 +48440,7 @@ "qname": "sklearn.covariance._elliptic_envelope.EllipticEnvelope.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Determines the pseudo random number generator for shuffling\nthe data. Pass an int for reproducible results across multiple function\ncalls. See :term:`Glossary `." @@ -48818,7 +48465,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -48834,7 +48481,7 @@ "qname": "sklearn.covariance._elliptic_envelope.EllipticEnvelope.decision_function.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -48847,7 +48494,7 @@ "qname": "sklearn.covariance._elliptic_envelope.EllipticEnvelope.decision_function.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "The data matrix." @@ -48859,7 +48506,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute the decision function of the given observations.", "docstring": "Compute the decision function of the given observations.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The data matrix.\n\n Returns\n -------\n decision : ndarray of shape (n_samples,)\n Decision function of the samples.\n It is equal to the shifted Mahalanobis distances.\n The threshold for being an outlier is 0, which ensures a\n compatibility with other outlier detection algorithms.\n " }, @@ -48875,7 +48522,7 @@ "qname": "sklearn.covariance._elliptic_envelope.EllipticEnvelope.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -48888,7 +48535,7 @@ "qname": "sklearn.covariance._elliptic_envelope.EllipticEnvelope.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training data." @@ -48904,7 +48551,7 @@ "qname": "sklearn.covariance._elliptic_envelope.EllipticEnvelope.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -48916,7 +48563,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the EllipticEnvelope model.", "docstring": "Fit the EllipticEnvelope model.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n " }, @@ -48932,7 +48579,7 @@ "qname": "sklearn.covariance._elliptic_envelope.EllipticEnvelope.predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -48945,7 +48592,7 @@ "qname": "sklearn.covariance._elliptic_envelope.EllipticEnvelope.predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "The data matrix." @@ -48957,7 +48604,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict labels (1 inlier, -1 outlier) of X according to fitted model.", "docstring": "\n Predict labels (1 inlier, -1 outlier) of X according to fitted model.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The data matrix.\n\n Returns\n -------\n is_inlier : ndarray of shape (n_samples,)\n Returns -1 for anomalies/outliers and +1 for inliers.\n " }, @@ -48973,7 +48620,7 @@ "qname": "sklearn.covariance._elliptic_envelope.EllipticEnvelope.score.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -48986,7 +48633,7 @@ "qname": "sklearn.covariance._elliptic_envelope.EllipticEnvelope.score.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Test samples." @@ -49002,7 +48649,7 @@ "qname": "sklearn.covariance._elliptic_envelope.EllipticEnvelope.score.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,) or (n_samples, n_outputs)", "description": "True labels for X." @@ -49018,7 +48665,7 @@ "qname": "sklearn.covariance._elliptic_envelope.EllipticEnvelope.score.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "Sample weights." @@ -49030,7 +48677,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Return the mean accuracy on the given test data and labels.\n\nIn multi-label classification, this is the subset accuracy\nwhich is a harsh metric since you require for each sample that\neach label set be correctly predicted.", "docstring": "Return the mean accuracy on the given test data and labels.\n\n In multi-label classification, this is the subset accuracy\n which is a harsh metric since you require for each sample that\n each label set be correctly predicted.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Test samples.\n\n y : array-like of shape (n_samples,) or (n_samples, n_outputs)\n True labels for X.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n Returns\n -------\n score : float\n Mean accuracy of self.predict(X) w.r.t. y.\n " }, @@ -49046,7 +48693,7 @@ "qname": "sklearn.covariance._elliptic_envelope.EllipticEnvelope.score_samples.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -49059,7 +48706,7 @@ "qname": "sklearn.covariance._elliptic_envelope.EllipticEnvelope.score_samples.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "The data matrix." @@ -49071,7 +48718,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute the negative Mahalanobis distances.", "docstring": "Compute the negative Mahalanobis distances.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The data matrix.\n\n Returns\n -------\n negative_mahal_distances : array-like of shape (n_samples,)\n Opposite of the Mahalanobis distances.\n " }, @@ -49087,7 +48734,7 @@ "qname": "sklearn.covariance._empirical_covariance.EmpiricalCovariance.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -49100,7 +48747,7 @@ "qname": "sklearn.covariance._empirical_covariance.EmpiricalCovariance.__init__.store_precision", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Specifies if the estimated precision is stored." @@ -49116,7 +48763,7 @@ "qname": "sklearn.covariance._empirical_covariance.EmpiricalCovariance.__init__.assume_centered", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If True, data are not centered before computation.\nUseful when working with data whose mean is almost, but not exactly\nzero.\nIf False (default), data are centered before computation." @@ -49128,7 +48775,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -49185,7 +48832,7 @@ "qname": "sklearn.covariance._empirical_covariance.EmpiricalCovariance.error_norm.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -49198,7 +48845,7 @@ "qname": "sklearn.covariance._empirical_covariance.EmpiricalCovariance.error_norm.comp_cov", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_features, n_features)", "description": "The covariance to compare with." @@ -49214,7 +48861,7 @@ "qname": "sklearn.covariance._empirical_covariance.EmpiricalCovariance.error_norm.norm", "default_value": "'frobenius'", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{\"frobenius\", \"spectral\"}, default=\"frobenius\"", "description": "The type of norm used to compute the error. Available error types:\n- 'frobenius' (default): sqrt(tr(A^t.A))\n- 'spectral': sqrt(max(eigenvalues(A^t.A))\nwhere A is the error ``(comp_cov - self.covariance_)``." @@ -49230,7 +48877,7 @@ "qname": "sklearn.covariance._empirical_covariance.EmpiricalCovariance.error_norm.scaling", "default_value": "True", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If True (default), the squared error norm is divided by n_features.\nIf False, the squared error norm is not rescaled." @@ -49246,7 +48893,7 @@ "qname": "sklearn.covariance._empirical_covariance.EmpiricalCovariance.error_norm.squared", "default_value": "True", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to compute the squared error norm or the error norm.\nIf True (default), the squared error norm is returned.\nIf False, the error norm is returned." @@ -49258,7 +48905,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute the Mean Squared Error between two covariance estimators.", "docstring": "Compute the Mean Squared Error between two covariance estimators.\n\n Parameters\n ----------\n comp_cov : array-like of shape (n_features, n_features)\n The covariance to compare with.\n\n norm : {\"frobenius\", \"spectral\"}, default=\"frobenius\"\n The type of norm used to compute the error. Available error types:\n - 'frobenius' (default): sqrt(tr(A^t.A))\n - 'spectral': sqrt(max(eigenvalues(A^t.A))\n where A is the error ``(comp_cov - self.covariance_)``.\n\n scaling : bool, default=True\n If True (default), the squared error norm is divided by n_features.\n If False, the squared error norm is not rescaled.\n\n squared : bool, default=True\n Whether to compute the squared error norm or the error norm.\n If True (default), the squared error norm is returned.\n If False, the error norm is returned.\n\n Returns\n -------\n result : float\n The Mean Squared Error (in the sense of the Frobenius norm) between\n `self` and `comp_cov` covariance estimators.\n " }, @@ -49274,7 +48921,7 @@ "qname": "sklearn.covariance._empirical_covariance.EmpiricalCovariance.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -49287,7 +48934,7 @@ "qname": "sklearn.covariance._empirical_covariance.EmpiricalCovariance.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training data, where `n_samples` is the number of samples and\n`n_features` is the number of features." @@ -49303,7 +48950,7 @@ "qname": "sklearn.covariance._empirical_covariance.EmpiricalCovariance.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -49315,9 +48962,9 @@ } ], "results": [], - "is_public": false, - "description": "Fit the maximum likelihood covariance estimator to X.", - "docstring": "Fit the maximum likelihood covariance estimator to X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n " + "is_public": true, + "description": "Fit the maximum liklihood covariance estimator to X.", + "docstring": "Fit the maximum liklihood covariance estimator to X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n " }, { "id": "sklearn/sklearn.covariance._empirical_covariance/EmpiricalCovariance/get_precision", @@ -49331,7 +48978,7 @@ "qname": "sklearn.covariance._empirical_covariance.EmpiricalCovariance.get_precision.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -49340,7 +48987,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Getter for the precision matrix.", "docstring": "Getter for the precision matrix.\n\n Returns\n -------\n precision_ : array-like of shape (n_features, n_features)\n The precision matrix associated to the current covariance object.\n " }, @@ -49356,7 +49003,7 @@ "qname": "sklearn.covariance._empirical_covariance.EmpiricalCovariance.mahalanobis.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -49369,7 +49016,7 @@ "qname": "sklearn.covariance._empirical_covariance.EmpiricalCovariance.mahalanobis.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "The observations, the Mahalanobis distances of the which we\ncompute. Observations are assumed to be drawn from the same\ndistribution than the data used in fit." @@ -49381,7 +49028,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute the squared Mahalanobis distances of given observations.", "docstring": "Compute the squared Mahalanobis distances of given observations.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The observations, the Mahalanobis distances of the which we\n compute. Observations are assumed to be drawn from the same\n distribution than the data used in fit.\n\n Returns\n -------\n dist : ndarray of shape (n_samples,)\n Squared Mahalanobis distances of the observations.\n " }, @@ -49397,7 +49044,7 @@ "qname": "sklearn.covariance._empirical_covariance.EmpiricalCovariance.score.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -49410,7 +49057,7 @@ "qname": "sklearn.covariance._empirical_covariance.EmpiricalCovariance.score.X_test", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Test data of which we compute the likelihood, where `n_samples` is\nthe number of samples and `n_features` is the number of features.\n`X_test` is assumed to be drawn from the same distribution than\nthe data used in fit (including centering)." @@ -49426,7 +49073,7 @@ "qname": "sklearn.covariance._empirical_covariance.EmpiricalCovariance.score.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -49438,7 +49085,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute the log-likelihood of `X_test` under the estimated Gaussian model.\n\nThe Gaussian model is defined by its mean and covariance matrix which are\nrepresented respectively by `self.location_` and `self.covariance_`.", "docstring": "Compute the log-likelihood of `X_test` under the estimated Gaussian model.\n\n The Gaussian model is defined by its mean and covariance matrix which are\n represented respectively by `self.location_` and `self.covariance_`.\n\n Parameters\n ----------\n X_test : array-like of shape (n_samples, n_features)\n Test data of which we compute the likelihood, where `n_samples` is\n the number of samples and `n_features` is the number of features.\n `X_test` is assumed to be drawn from the same distribution than\n the data used in fit (including centering).\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n res : float\n The log-likelihood of `X_test` with `self.location_` and `self.covariance_`\n as estimators of the Gaussian model mean and covariance matrix respectively.\n " }, @@ -49542,7 +49189,7 @@ "qname": "sklearn.covariance._graph_lasso.GraphicalLasso.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -49555,7 +49202,7 @@ "qname": "sklearn.covariance._graph_lasso.GraphicalLasso.__init__.alpha", "default_value": "0.01", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.01", "description": "The regularization parameter: the higher alpha, the more\nregularization, the sparser the inverse covariance.\nRange is (0, inf]." @@ -49571,14 +49218,14 @@ "qname": "sklearn.covariance._graph_lasso.GraphicalLasso.__init__.mode", "default_value": "'cd'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'cd', 'lars'}, default='cd'", "description": "The Lasso solver to use: coordinate descent or LARS. Use LARS for\nvery sparse underlying graphs, where p > n. Elsewhere prefer cd\nwhich is more numerically stable." }, "type": { "kind": "EnumType", - "values": ["cd", "lars"] + "values": ["lars", "cd"] } }, { @@ -49587,7 +49234,7 @@ "qname": "sklearn.covariance._graph_lasso.GraphicalLasso.__init__.tol", "default_value": "0.0001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-4", "description": "The tolerance to declare convergence: if the dual gap goes below\nthis value, iterations are stopped. Range is (0, inf]." @@ -49603,7 +49250,7 @@ "qname": "sklearn.covariance._graph_lasso.GraphicalLasso.__init__.enet_tol", "default_value": "0.0001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-4", "description": "The tolerance for the elastic net solver used to calculate the descent\ndirection. This parameter controls the accuracy of the search direction\nfor a given column update, not of the overall parameter estimate. Only\nused for mode='cd'. Range is (0, inf]." @@ -49619,7 +49266,7 @@ "qname": "sklearn.covariance._graph_lasso.GraphicalLasso.__init__.max_iter", "default_value": "100", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=100", "description": "The maximum number of iterations." @@ -49635,7 +49282,7 @@ "qname": "sklearn.covariance._graph_lasso.GraphicalLasso.__init__.verbose", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If verbose is True, the objective function and dual gap are\nplotted at each iteration." @@ -49651,7 +49298,7 @@ "qname": "sklearn.covariance._graph_lasso.GraphicalLasso.__init__.assume_centered", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If True, data are not centered before computation.\nUseful when working with data whose mean is almost, but not exactly\nzero.\nIf False, data are centered before computation." @@ -49663,7 +49310,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -49679,7 +49326,7 @@ "qname": "sklearn.covariance._graph_lasso.GraphicalLasso.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -49692,7 +49339,7 @@ "qname": "sklearn.covariance._graph_lasso.GraphicalLasso.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Data from which to compute the covariance estimate." @@ -49708,7 +49355,7 @@ "qname": "sklearn.covariance._graph_lasso.GraphicalLasso.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -49720,7 +49367,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the GraphicalLasso model to X.", "docstring": "Fit the GraphicalLasso model to X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Data from which to compute the covariance estimate.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n " }, @@ -49736,7 +49383,7 @@ "qname": "sklearn.covariance._graph_lasso.GraphicalLassoCV.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -49749,7 +49396,7 @@ "qname": "sklearn.covariance._graph_lasso.GraphicalLassoCV.__init__.alphas", "default_value": "4", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or array-like of shape (n_alphas,), dtype=float, default=4", "description": "If an integer is given, it fixes the number of points on the\ngrids of alpha to be used. If a list is given, it gives the\ngrid to be used. See the notes in the class docstring for\nmore details. Range is (0, inf] when floats given." @@ -49778,7 +49425,7 @@ "qname": "sklearn.covariance._graph_lasso.GraphicalLassoCV.__init__.n_refinements", "default_value": "4", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=4", "description": "The number of times the grid is refined. Not used if explicit\nvalues of alphas are passed. Range is [1, inf)." @@ -49794,7 +49441,7 @@ "qname": "sklearn.covariance._graph_lasso.GraphicalLassoCV.__init__.cv", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, cross-validation generator or iterable, default=None", "description": "Determines the cross-validation splitting strategy.\nPossible inputs for cv are:\n\n- None, to use the default 5-fold cross-validation,\n- integer, to specify the number of folds.\n- :term:`CV splitter`,\n- An iterable yielding (train, test) splits as arrays of indices.\n\nFor integer/None inputs :class:`KFold` is used.\n\nRefer :ref:`User Guide ` for the various\ncross-validation strategies that can be used here.\n\n.. versionchanged:: 0.20\n ``cv`` default value if None changed from 3-fold to 5-fold." @@ -49823,7 +49470,7 @@ "qname": "sklearn.covariance._graph_lasso.GraphicalLassoCV.__init__.tol", "default_value": "0.0001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-4", "description": "The tolerance to declare convergence: if the dual gap goes below\nthis value, iterations are stopped. Range is (0, inf]." @@ -49839,7 +49486,7 @@ "qname": "sklearn.covariance._graph_lasso.GraphicalLassoCV.__init__.enet_tol", "default_value": "0.0001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-4", "description": "The tolerance for the elastic net solver used to calculate the descent\ndirection. This parameter controls the accuracy of the search direction\nfor a given column update, not of the overall parameter estimate. Only\nused for mode='cd'. Range is (0, inf]." @@ -49855,7 +49502,7 @@ "qname": "sklearn.covariance._graph_lasso.GraphicalLassoCV.__init__.max_iter", "default_value": "100", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=100", "description": "Maximum number of iterations." @@ -49871,14 +49518,14 @@ "qname": "sklearn.covariance._graph_lasso.GraphicalLassoCV.__init__.mode", "default_value": "'cd'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'cd', 'lars'}, default='cd'", "description": "The Lasso solver to use: coordinate descent or LARS. Use LARS for\nvery sparse underlying graphs, where number of features is greater\nthan number of samples. Elsewhere prefer cd which is more numerically\nstable." }, "type": { "kind": "EnumType", - "values": ["cd", "lars"] + "values": ["lars", "cd"] } }, { @@ -49887,7 +49534,7 @@ "qname": "sklearn.covariance._graph_lasso.GraphicalLassoCV.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Number of jobs to run in parallel.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details.\n\n.. versionchanged:: v0.20\n `n_jobs` default changed from 1 to None" @@ -49903,7 +49550,7 @@ "qname": "sklearn.covariance._graph_lasso.GraphicalLassoCV.__init__.verbose", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If verbose is True, the objective function and duality gap are\nprinted at each iteration." @@ -49919,7 +49566,7 @@ "qname": "sklearn.covariance._graph_lasso.GraphicalLassoCV.__init__.assume_centered", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If True, data are not centered before computation.\nUseful when working with data whose mean is almost, but not exactly\nzero.\nIf False, data are centered before computation." @@ -49931,7 +49578,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -49947,7 +49594,7 @@ "qname": "sklearn.covariance._graph_lasso.GraphicalLassoCV.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -49960,7 +49607,7 @@ "qname": "sklearn.covariance._graph_lasso.GraphicalLassoCV.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Data from which to compute the covariance estimate." @@ -49976,7 +49623,7 @@ "qname": "sklearn.covariance._graph_lasso.GraphicalLassoCV.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -49988,7 +49635,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the GraphicalLasso covariance model to X.", "docstring": "Fit the GraphicalLasso covariance model to X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Data from which to compute the covariance estimate.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n " }, @@ -50316,7 +49963,7 @@ }, "type": { "kind": "EnumType", - "values": ["cd", "lars"] + "values": ["lars", "cd"] } }, { @@ -50520,7 +50167,7 @@ }, "type": { "kind": "EnumType", - "values": ["cd", "lars"] + "values": ["lars", "cd"] } }, { @@ -50614,7 +50261,7 @@ "qname": "sklearn.covariance._robust_covariance.MinCovDet.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -50627,7 +50274,7 @@ "qname": "sklearn.covariance._robust_covariance.MinCovDet.__init__.store_precision", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Specify if the estimated precision is stored." @@ -50643,7 +50290,7 @@ "qname": "sklearn.covariance._robust_covariance.MinCovDet.__init__.assume_centered", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If True, the support of the robust location and the covariance\nestimates is computed, and a covariance estimate is recomputed from\nit, without centering the data.\nUseful to work with data whose mean is significantly equal to\nzero but is not exactly zero.\nIf False, the robust location and covariance are directly computed\nwith the FastMCD algorithm without additional treatment." @@ -50659,7 +50306,7 @@ "qname": "sklearn.covariance._robust_covariance.MinCovDet.__init__.support_fraction", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=None", "description": "The proportion of points to be included in the support of the raw\nMCD estimate. Default is None, which implies that the minimum\nvalue of support_fraction will be used within the algorithm:\n`(n_sample + n_features + 1) / 2`. The parameter must be in the range\n(0, 1)." @@ -50675,7 +50322,7 @@ "qname": "sklearn.covariance._robust_covariance.MinCovDet.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Determines the pseudo random number generator for shuffling the data.\nPass an int for reproducible results across multiple function calls.\nSee :term:`Glossary `." @@ -50700,7 +50347,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -50716,7 +50363,7 @@ "qname": "sklearn.covariance._robust_covariance.MinCovDet.correct_covariance.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -50729,7 +50376,7 @@ "qname": "sklearn.covariance._robust_covariance.MinCovDet.correct_covariance.data", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "The data matrix, with p features and n samples.\nThe data set must be the one which was used to compute\nthe raw estimates." @@ -50741,7 +50388,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Apply a correction to raw Minimum Covariance Determinant estimates.\n\nCorrection using the empirical correction factor suggested\nby Rousseeuw and Van Driessen in [RVD]_.", "docstring": "Apply a correction to raw Minimum Covariance Determinant estimates.\n\n Correction using the empirical correction factor suggested\n by Rousseeuw and Van Driessen in [RVD]_.\n\n Parameters\n ----------\n data : array-like of shape (n_samples, n_features)\n The data matrix, with p features and n samples.\n The data set must be the one which was used to compute\n the raw estimates.\n\n Returns\n -------\n covariance_corrected : ndarray of shape (n_features, n_features)\n Corrected robust covariance estimate.\n\n References\n ----------\n\n .. [RVD] A Fast Algorithm for the Minimum Covariance\n Determinant Estimator, 1999, American Statistical Association\n and the American Society for Quality, TECHNOMETRICS\n " }, @@ -50757,7 +50404,7 @@ "qname": "sklearn.covariance._robust_covariance.MinCovDet.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -50770,7 +50417,7 @@ "qname": "sklearn.covariance._robust_covariance.MinCovDet.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training data, where `n_samples` is the number of samples\nand `n_features` is the number of features." @@ -50786,7 +50433,7 @@ "qname": "sklearn.covariance._robust_covariance.MinCovDet.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -50798,7 +50445,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit a Minimum Covariance Determinant with the FastMCD algorithm.", "docstring": "Fit a Minimum Covariance Determinant with the FastMCD algorithm.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n " }, @@ -50814,7 +50461,7 @@ "qname": "sklearn.covariance._robust_covariance.MinCovDet.reweight_covariance.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -50827,7 +50474,7 @@ "qname": "sklearn.covariance._robust_covariance.MinCovDet.reweight_covariance.data", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "The data matrix, with p features and n samples.\nThe data set must be the one which was used to compute\nthe raw estimates." @@ -50839,7 +50486,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Re-weight raw Minimum Covariance Determinant estimates.\n\nRe-weight observations using Rousseeuw's method (equivalent to\ndeleting outlying observations from the data set before\ncomputing location and covariance estimates) described\nin [RVDriessen]_.", "docstring": "Re-weight raw Minimum Covariance Determinant estimates.\n\n Re-weight observations using Rousseeuw's method (equivalent to\n deleting outlying observations from the data set before\n computing location and covariance estimates) described\n in [RVDriessen]_.\n\n Parameters\n ----------\n data : array-like of shape (n_samples, n_features)\n The data matrix, with p features and n samples.\n The data set must be the one which was used to compute\n the raw estimates.\n\n Returns\n -------\n location_reweighted : ndarray of shape (n_features,)\n Re-weighted robust location estimate.\n\n covariance_reweighted : ndarray of shape (n_features, n_features)\n Re-weighted robust covariance estimate.\n\n support_reweighted : ndarray of shape (n_samples,), dtype=bool\n A mask of the observations that have been used to compute\n the re-weighted robust location and covariance estimates.\n\n References\n ----------\n\n .. [RVDriessen] A Fast Algorithm for the Minimum Covariance\n Determinant Estimator, 1999, American Statistical Association\n and the American Society for Quality, TECHNOMETRICS\n " }, @@ -51346,7 +50993,7 @@ "qname": "sklearn.covariance._shrunk_covariance.LedoitWolf.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -51359,7 +51006,7 @@ "qname": "sklearn.covariance._shrunk_covariance.LedoitWolf.__init__.store_precision", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Specify if the estimated precision is stored." @@ -51375,7 +51022,7 @@ "qname": "sklearn.covariance._shrunk_covariance.LedoitWolf.__init__.assume_centered", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If True, data will not be centered before computation.\nUseful when working with data whose mean is almost, but not exactly\nzero.\nIf False (default), data will be centered before computation." @@ -51391,7 +51038,7 @@ "qname": "sklearn.covariance._shrunk_covariance.LedoitWolf.__init__.block_size", "default_value": "1000", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1000", "description": "Size of blocks into which the covariance matrix will be split\nduring its Ledoit-Wolf estimation. This is purely a memory\noptimization and does not affect results." @@ -51403,7 +51050,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -51419,7 +51066,7 @@ "qname": "sklearn.covariance._shrunk_covariance.LedoitWolf.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -51432,7 +51079,7 @@ "qname": "sklearn.covariance._shrunk_covariance.LedoitWolf.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training data, where `n_samples` is the number of samples\nand `n_features` is the number of features." @@ -51448,7 +51095,7 @@ "qname": "sklearn.covariance._shrunk_covariance.LedoitWolf.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -51460,7 +51107,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the Ledoit-Wolf shrunk covariance model to X.", "docstring": "Fit the Ledoit-Wolf shrunk covariance model to X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n " }, @@ -51476,7 +51123,7 @@ "qname": "sklearn.covariance._shrunk_covariance.OAS.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -51489,7 +51136,7 @@ "qname": "sklearn.covariance._shrunk_covariance.OAS.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training data, where `n_samples` is the number of samples\nand `n_features` is the number of features." @@ -51505,7 +51152,7 @@ "qname": "sklearn.covariance._shrunk_covariance.OAS.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -51517,7 +51164,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the Oracle Approximating Shrinkage covariance model to X.", "docstring": "Fit the Oracle Approximating Shrinkage covariance model to X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n " }, @@ -51533,7 +51180,7 @@ "qname": "sklearn.covariance._shrunk_covariance.ShrunkCovariance.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -51546,7 +51193,7 @@ "qname": "sklearn.covariance._shrunk_covariance.ShrunkCovariance.__init__.store_precision", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Specify if the estimated precision is stored." @@ -51562,7 +51209,7 @@ "qname": "sklearn.covariance._shrunk_covariance.ShrunkCovariance.__init__.assume_centered", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If True, data will not be centered before computation.\nUseful when working with data whose mean is almost, but not exactly\nzero.\nIf False, data will be centered before computation." @@ -51578,7 +51225,7 @@ "qname": "sklearn.covariance._shrunk_covariance.ShrunkCovariance.__init__.shrinkage", "default_value": "0.1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.1", "description": "Coefficient in the convex combination used for the computation\nof the shrunk estimate. Range is [0, 1]." @@ -51590,7 +51237,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -51606,7 +51253,7 @@ "qname": "sklearn.covariance._shrunk_covariance.ShrunkCovariance.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -51619,7 +51266,7 @@ "qname": "sklearn.covariance._shrunk_covariance.ShrunkCovariance.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training data, where `n_samples` is the number of samples\nand `n_features` is the number of features." @@ -51635,7 +51282,7 @@ "qname": "sklearn.covariance._shrunk_covariance.ShrunkCovariance.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -51647,7 +51294,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the shrunk covariance model to X.", "docstring": "Fit the shrunk covariance model to X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n " }, @@ -51871,7 +51518,7 @@ "qname": "sklearn.cross_decomposition._pls.CCA.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -51884,7 +51531,7 @@ "qname": "sklearn.cross_decomposition._pls.CCA.__init__.n_components", "default_value": "2", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=2", "description": "Number of components to keep. Should be in `[1, min(n_samples,\nn_features, n_targets)]`." @@ -51900,7 +51547,7 @@ "qname": "sklearn.cross_decomposition._pls.CCA.__init__.scale", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to scale `X` and `Y`." @@ -51916,7 +51563,7 @@ "qname": "sklearn.cross_decomposition._pls.CCA.__init__.max_iter", "default_value": "500", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=500", "description": "The maximum number of iterations of the power method." @@ -51932,7 +51579,7 @@ "qname": "sklearn.cross_decomposition._pls.CCA.__init__.tol", "default_value": "1e-06", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-06", "description": "The tolerance used as convergence criteria in the power method: the\nalgorithm stops whenever the squared norm of `u_i - u_{i-1}` is less\nthan `tol`, where `u` corresponds to the left singular vector." @@ -51948,7 +51595,7 @@ "qname": "sklearn.cross_decomposition._pls.CCA.__init__.copy", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to copy `X` and `Y` in fit before applying centering, and\npotentially scaling. If False, these operations will be done inplace,\nmodifying both arrays." @@ -51960,7 +51607,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -51976,7 +51623,7 @@ "qname": "sklearn.cross_decomposition._pls.PLSCanonical.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -51989,7 +51636,7 @@ "qname": "sklearn.cross_decomposition._pls.PLSCanonical.__init__.n_components", "default_value": "2", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=2", "description": "Number of components to keep. Should be in `[1, min(n_samples,\nn_features, n_targets)]`." @@ -52005,7 +51652,7 @@ "qname": "sklearn.cross_decomposition._pls.PLSCanonical.__init__.scale", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to scale `X` and `Y`." @@ -52021,7 +51668,7 @@ "qname": "sklearn.cross_decomposition._pls.PLSCanonical.__init__.algorithm", "default_value": "'nipals'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'nipals', 'svd'}, default='nipals'", "description": "The algorithm used to estimate the first singular vectors of the\ncross-covariance matrix. 'nipals' uses the power method while 'svd'\nwill compute the whole SVD." @@ -52037,7 +51684,7 @@ "qname": "sklearn.cross_decomposition._pls.PLSCanonical.__init__.max_iter", "default_value": "500", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=500", "description": "The maximum number of iterations of the power method when\n`algorithm='nipals'`. Ignored otherwise." @@ -52053,7 +51700,7 @@ "qname": "sklearn.cross_decomposition._pls.PLSCanonical.__init__.tol", "default_value": "1e-06", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-06", "description": "The tolerance used as convergence criteria in the power method: the\nalgorithm stops whenever the squared norm of `u_i - u_{i-1}` is less\nthan `tol`, where `u` corresponds to the left singular vector." @@ -52069,7 +51716,7 @@ "qname": "sklearn.cross_decomposition._pls.PLSCanonical.__init__.copy", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to copy `X` and `Y` in fit before applying centering, and\npotentially scaling. If False, these operations will be done inplace,\nmodifying both arrays." @@ -52081,7 +51728,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -52097,7 +51744,7 @@ "qname": "sklearn.cross_decomposition._pls.PLSRegression.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -52110,7 +51757,7 @@ "qname": "sklearn.cross_decomposition._pls.PLSRegression.__init__.n_components", "default_value": "2", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=2", "description": "Number of components to keep. Should be in `[1, min(n_samples,\nn_features, n_targets)]`." @@ -52126,7 +51773,7 @@ "qname": "sklearn.cross_decomposition._pls.PLSRegression.__init__.scale", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to scale `X` and `Y`." @@ -52142,7 +51789,7 @@ "qname": "sklearn.cross_decomposition._pls.PLSRegression.__init__.max_iter", "default_value": "500", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=500", "description": "The maximum number of iterations of the power method when\n`algorithm='nipals'`. Ignored otherwise." @@ -52158,7 +51805,7 @@ "qname": "sklearn.cross_decomposition._pls.PLSRegression.__init__.tol", "default_value": "1e-06", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-06", "description": "The tolerance used as convergence criteria in the power method: the\nalgorithm stops whenever the squared norm of `u_i - u_{i-1}` is less\nthan `tol`, where `u` corresponds to the left singular vector." @@ -52174,7 +51821,7 @@ "qname": "sklearn.cross_decomposition._pls.PLSRegression.__init__.copy", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to copy `X` and `Y` in :term:`fit` before applying centering,\nand potentially scaling. If `False`, these operations will be done\ninplace, modifying both arrays." @@ -52186,7 +51833,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -52202,7 +51849,7 @@ "qname": "sklearn.cross_decomposition._pls.PLSRegression.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -52215,7 +51862,7 @@ "qname": "sklearn.cross_decomposition._pls.PLSRegression.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training vectors, where `n_samples` is the number of samples and\n`n_features` is the number of predictors." @@ -52231,7 +51878,7 @@ "qname": "sklearn.cross_decomposition._pls.PLSRegression.fit.Y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,) or (n_samples, n_targets)", "description": "Target vectors, where `n_samples` is the number of samples and\n`n_targets` is the number of response variables." @@ -52243,7 +51890,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit model to data.", "docstring": "Fit model to data.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training vectors, where `n_samples` is the number of samples and\n `n_features` is the number of predictors.\n\n Y : array-like of shape (n_samples,) or (n_samples, n_targets)\n Target vectors, where `n_samples` is the number of samples and\n `n_targets` is the number of response variables.\n\n Returns\n -------\n self : object\n Fitted model.\n " }, @@ -52259,7 +51906,7 @@ "qname": "sklearn.cross_decomposition._pls.PLSSVD.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -52272,7 +51919,7 @@ "qname": "sklearn.cross_decomposition._pls.PLSSVD.__init__.n_components", "default_value": "2", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=2", "description": "The number of components to keep. Should be in `[1,\nmin(n_samples, n_features, n_targets)]`." @@ -52288,7 +51935,7 @@ "qname": "sklearn.cross_decomposition._pls.PLSSVD.__init__.scale", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to scale `X` and `Y`." @@ -52304,7 +51951,7 @@ "qname": "sklearn.cross_decomposition._pls.PLSSVD.__init__.copy", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to copy `X` and `Y` in fit before applying centering, and\npotentially scaling. If `False`, these operations will be done inplace,\nmodifying both arrays." @@ -52316,7 +51963,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -52332,7 +51979,7 @@ "qname": "sklearn.cross_decomposition._pls.PLSSVD.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -52345,7 +51992,7 @@ "qname": "sklearn.cross_decomposition._pls.PLSSVD.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training samples." @@ -52361,7 +52008,7 @@ "qname": "sklearn.cross_decomposition._pls.PLSSVD.fit.Y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,) or (n_samples, n_targets)", "description": "Targets." @@ -52373,7 +52020,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit model to data.", "docstring": "Fit model to data.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training samples.\n\n Y : array-like of shape (n_samples,) or (n_samples, n_targets)\n Targets.\n\n Returns\n -------\n self : object\n Fitted estimator.\n " }, @@ -52389,7 +52036,7 @@ "qname": "sklearn.cross_decomposition._pls.PLSSVD.fit_transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -52402,7 +52049,7 @@ "qname": "sklearn.cross_decomposition._pls.PLSSVD.fit_transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training samples." @@ -52418,7 +52065,7 @@ "qname": "sklearn.cross_decomposition._pls.PLSSVD.fit_transform.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,) or (n_samples, n_targets), default=None", "description": "Targets." @@ -52430,7 +52077,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Learn and apply the dimensionality reduction.", "docstring": "Learn and apply the dimensionality reduction.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training samples.\n\n y : array-like of shape (n_samples,) or (n_samples, n_targets), default=None\n Targets.\n\n Returns\n -------\n out : array-like or tuple of array-like\n The transformed data `X_tranformed` if `Y is not None`,\n `(X_transformed, Y_transformed)` otherwise.\n " }, @@ -52446,7 +52093,7 @@ "qname": "sklearn.cross_decomposition._pls.PLSSVD.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -52459,7 +52106,7 @@ "qname": "sklearn.cross_decomposition._pls.PLSSVD.transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Samples to be transformed." @@ -52475,7 +52122,7 @@ "qname": "sklearn.cross_decomposition._pls.PLSSVD.transform.Y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,) or (n_samples, n_targets), default=None", "description": "Targets." @@ -52487,7 +52134,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Apply the dimensionality reduction.", "docstring": "\n Apply the dimensionality reduction.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Samples to be transformed.\n\n Y : array-like of shape (n_samples,) or (n_samples, n_targets), default=None\n Targets.\n\n Returns\n -------\n x_scores : array-like or tuple of array-like\n The transformed data `X_tranformed` if `Y is not None`,\n `(X_transformed, Y_transformed)` otherwise.\n " }, @@ -53214,90 +52861,149 @@ "docstring": "Same as svd_flip but works on 1d arrays, and is inplace" }, { - "id": "sklearn/sklearn.datasets._arff_parser/_liac_arff_parser", - "name": "_liac_arff_parser", - "qname": "sklearn.datasets._arff_parser._liac_arff_parser", + "id": "sklearn/sklearn.datasets._arff_parser/_convert_arff_data", + "name": "_convert_arff_data", + "qname": "sklearn.datasets._arff_parser._convert_arff_data", "decorators": [], "parameters": [ { - "id": "sklearn/sklearn.datasets._arff_parser/_liac_arff_parser/gzip_file", - "name": "gzip_file", - "qname": "sklearn.datasets._arff_parser._liac_arff_parser.gzip_file", + "id": "sklearn/sklearn.datasets._arff_parser/_convert_arff_data/arff", + "name": "arff", + "qname": "sklearn.datasets._arff_parser._convert_arff_data.arff", "default_value": null, "assigned_by": "POSITION_OR_NAME", "is_public": false, "docstring": { - "type": "GzipFile instance", - "description": "The file compressed to be read." + "type": "dict", + "description": "As obtained from liac-arff object." }, "type": { "kind": "NamedType", - "name": "GzipFile instance" + "name": "dict" } }, { - "id": "sklearn/sklearn.datasets._arff_parser/_liac_arff_parser/output_arrays_type", - "name": "output_arrays_type", - "qname": "sklearn.datasets._arff_parser._liac_arff_parser.output_arrays_type", + "id": "sklearn/sklearn.datasets._arff_parser/_convert_arff_data/col_slice_x", + "name": "col_slice_x", + "qname": "sklearn.datasets._arff_parser._convert_arff_data.col_slice_x", "default_value": null, "assigned_by": "POSITION_OR_NAME", "is_public": false, "docstring": { - "type": "", - "description": "" + "type": "list", + "description": "The column indices that are sliced from the original array to return\nas X data" }, - "type": {} + "type": { + "kind": "NamedType", + "name": "list" + } }, { - "id": "sklearn/sklearn.datasets._arff_parser/_liac_arff_parser/openml_columns_info", - "name": "openml_columns_info", - "qname": "sklearn.datasets._arff_parser._liac_arff_parser.openml_columns_info", + "id": "sklearn/sklearn.datasets._arff_parser/_convert_arff_data/col_slice_y", + "name": "col_slice_y", + "qname": "sklearn.datasets._arff_parser._convert_arff_data.col_slice_y", "default_value": null, "assigned_by": "POSITION_OR_NAME", "is_public": false, + "docstring": { + "type": "list", + "description": "The column indices that are sliced from the original array to return\nas y data" + }, + "type": { + "kind": "NamedType", + "name": "list" + } + }, + { + "id": "sklearn/sklearn.datasets._arff_parser/_convert_arff_data/shape", + "name": "shape", + "qname": "sklearn.datasets._arff_parser._convert_arff_data.shape", + "default_value": "None", + "assigned_by": "POSITION_OR_NAME", + "is_public": false, "docstring": { "type": "", "description": "" }, "type": {} - }, + } + ], + "results": [], + "is_public": false, + "description": "converts the arff object into the appropriate matrix type (np.array or\nscipy.sparse.csr_matrix) based on the 'data part' (i.e., in the\nliac-arff dict, the object from the 'data' key)", + "docstring": "\n converts the arff object into the appropriate matrix type (np.array or\n scipy.sparse.csr_matrix) based on the 'data part' (i.e., in the\n liac-arff dict, the object from the 'data' key)\n\n Parameters\n ----------\n arff : dict\n As obtained from liac-arff object.\n\n col_slice_x : list\n The column indices that are sliced from the original array to return\n as X data\n\n col_slice_y : list\n The column indices that are sliced from the original array to return\n as y data\n\n Returns\n -------\n X : np.array or scipy.sparse.csr_matrix\n y : np.array\n " + }, + { + "id": "sklearn/sklearn.datasets._arff_parser/_convert_arff_data_dataframe", + "name": "_convert_arff_data_dataframe", + "qname": "sklearn.datasets._arff_parser._convert_arff_data_dataframe", + "decorators": [], + "parameters": [ { - "id": "sklearn/sklearn.datasets._arff_parser/_liac_arff_parser/feature_names_to_select", - "name": "feature_names_to_select", - "qname": "sklearn.datasets._arff_parser._liac_arff_parser.feature_names_to_select", + "id": "sklearn/sklearn.datasets._arff_parser/_convert_arff_data_dataframe/arff", + "name": "arff", + "qname": "sklearn.datasets._arff_parser._convert_arff_data_dataframe.arff", "default_value": null, "assigned_by": "POSITION_OR_NAME", "is_public": false, "docstring": { - "type": "list of str", - "description": "A list of the feature names to be selected." + "type": "dict", + "description": "As obtained from liac-arff object." }, "type": { "kind": "NamedType", - "name": "list of str" + "name": "dict" } }, { - "id": "sklearn/sklearn.datasets._arff_parser/_liac_arff_parser/target_names_to_select", - "name": "target_names_to_select", - "qname": "sklearn.datasets._arff_parser._liac_arff_parser.target_names_to_select", + "id": "sklearn/sklearn.datasets._arff_parser/_convert_arff_data_dataframe/columns", + "name": "columns", + "qname": "sklearn.datasets._arff_parser._convert_arff_data_dataframe.columns", "default_value": null, "assigned_by": "POSITION_OR_NAME", "is_public": false, "docstring": { - "type": "list of str", - "description": "A list of the target names to be selected." + "type": "list", + "description": "Columns from dataframe to return." }, "type": { "kind": "NamedType", - "name": "list of str" + "name": "list" } }, { - "id": "sklearn/sklearn.datasets._arff_parser/_liac_arff_parser/shape", - "name": "shape", - "qname": "sklearn.datasets._arff_parser._liac_arff_parser.shape", - "default_value": "None", + "id": "sklearn/sklearn.datasets._arff_parser/_convert_arff_data_dataframe/features_dict", + "name": "features_dict", + "qname": "sklearn.datasets._arff_parser._convert_arff_data_dataframe.features_dict", + "default_value": null, + "assigned_by": "POSITION_OR_NAME", + "is_public": false, + "docstring": { + "type": "dict", + "description": "Maps feature name to feature info from openml." + }, + "type": { + "kind": "NamedType", + "name": "dict" + } + } + ], + "results": [], + "is_public": false, + "description": "Convert the ARFF object into a pandas DataFrame.", + "docstring": "Convert the ARFF object into a pandas DataFrame.\n\n Parameters\n ----------\n arff : dict\n As obtained from liac-arff object.\n\n columns : list\n Columns from dataframe to return.\n\n features_dict : dict\n Maps feature name to feature info from openml.\n\n Returns\n -------\n result : tuple\n tuple with the resulting dataframe\n " + }, + { + "id": "sklearn/sklearn.datasets._arff_parser/_feature_to_dtype", + "name": "_feature_to_dtype", + "qname": "sklearn.datasets._arff_parser._feature_to_dtype", + "decorators": [], + "parameters": [ + { + "id": "sklearn/sklearn.datasets._arff_parser/_feature_to_dtype/feature", + "name": "feature", + "qname": "sklearn.datasets._arff_parser._feature_to_dtype.feature", + "default_value": null, "assigned_by": "POSITION_OR_NAME", "is_public": false, "docstring": { @@ -53309,160 +53015,124 @@ ], "results": [], "is_public": false, - "description": "ARFF parser using the LIAC-ARFF library coded purely in Python.\n\nThis parser is quite slow but consumes a generator. Currently it is needed\nto parse sparse datasets. For dense datasets, it is recommended to instead\nuse the pandas-based parser, although it does not always handles the\ndtypes exactly the same.", - "docstring": "ARFF parser using the LIAC-ARFF library coded purely in Python.\n\n This parser is quite slow but consumes a generator. Currently it is needed\n to parse sparse datasets. For dense datasets, it is recommended to instead\n use the pandas-based parser, although it does not always handles the\n dtypes exactly the same.\n\n Parameters\n ----------\n gzip_file : GzipFile instance\n The file compressed to be read.\n\n output_array_type : {\"numpy\", \"sparse\", \"pandas\"}\n The type of the arrays that will be returned. The possibilities ara:\n\n - `\"numpy\"`: both `X` and `y` will be NumPy arrays;\n - `\"sparse\"`: `X` will be sparse matrix and `y` will be a NumPy array;\n - `\"pandas\"`: `X` will be a pandas DataFrame and `y` will be either a\n pandas Series or DataFrame.\n\n columns_info : dict\n The information provided by OpenML regarding the columns of the ARFF\n file.\n\n feature_names_to_select : list of str\n A list of the feature names to be selected.\n\n target_names_to_select : list of str\n A list of the target names to be selected.\n\n Returns\n -------\n X : {ndarray, sparse matrix, dataframe}\n The data matrix.\n\n y : {ndarray, dataframe, series}\n The target.\n\n frame : dataframe or None\n A dataframe containing both `X` and `y`. `None` if\n `output_array_type != \"pandas\"`.\n\n categories : list of str or None\n The names of the features that are categorical. `None` if\n `output_array_type == \"pandas\"`.\n " + "description": "Map feature to dtype for pandas DataFrame", + "docstring": "Map feature to dtype for pandas DataFrame" }, { - "id": "sklearn/sklearn.datasets._arff_parser/_pandas_arff_parser", - "name": "_pandas_arff_parser", - "qname": "sklearn.datasets._arff_parser._pandas_arff_parser", + "id": "sklearn/sklearn.datasets._arff_parser/_liac_arff_parser", + "name": "_liac_arff_parser", + "qname": "sklearn.datasets._arff_parser._liac_arff_parser", "decorators": [], "parameters": [ { - "id": "sklearn/sklearn.datasets._arff_parser/_pandas_arff_parser/gzip_file", - "name": "gzip_file", - "qname": "sklearn.datasets._arff_parser._pandas_arff_parser.gzip_file", + "id": "sklearn/sklearn.datasets._arff_parser/_liac_arff_parser/arff_container", + "name": "arff_container", + "qname": "sklearn.datasets._arff_parser._liac_arff_parser.arff_container", "default_value": null, "assigned_by": "POSITION_OR_NAME", "is_public": false, "docstring": { - "type": "GzipFile instance", - "description": "The GZip compressed file with the ARFF formatted payload." + "type": "", + "description": "" }, - "type": { - "kind": "NamedType", - "name": "GzipFile instance" - } + "type": {} }, { - "id": "sklearn/sklearn.datasets._arff_parser/_pandas_arff_parser/output_type", - "name": "output_type", - "qname": "sklearn.datasets._arff_parser._pandas_arff_parser.output_type", + "id": "sklearn/sklearn.datasets._arff_parser/_liac_arff_parser/output_arrays_type", + "name": "output_arrays_type", + "qname": "sklearn.datasets._arff_parser._liac_arff_parser.output_arrays_type", "default_value": null, "assigned_by": "POSITION_OR_NAME", "is_public": false, "docstring": { - "type": "{\"numpy\", \"sparse\", \"pandas\"}", - "description": "The type of the arrays that will be returned. The possibilities are:\n\n- `\"numpy\"`: both `X` and `y` will be NumPy arrays;\n- `\"sparse\"`: `X` will be sparse matrix and `y` will be a NumPy array;\n- `\"pandas\"`: `X` will be a pandas DataFrame and `y` will be either a\n pandas Series or DataFrame." + "type": "", + "description": "" }, - "type": { - "kind": "EnumType", - "values": ["pandas", "sparse", "numpy"] - } + "type": {} }, { - "id": "sklearn/sklearn.datasets._arff_parser/_pandas_arff_parser/openml_columns_info", - "name": "openml_columns_info", - "qname": "sklearn.datasets._arff_parser._pandas_arff_parser.openml_columns_info", + "id": "sklearn/sklearn.datasets._arff_parser/_liac_arff_parser/features_dict", + "name": "features_dict", + "qname": "sklearn.datasets._arff_parser._liac_arff_parser.features_dict", "default_value": null, "assigned_by": "POSITION_OR_NAME", "is_public": false, "docstring": { - "type": "dict", - "description": "The information provided by OpenML regarding the columns of the ARFF\nfile." + "type": "", + "description": "" }, - "type": { - "kind": "NamedType", - "name": "dict" - } + "type": {} }, { - "id": "sklearn/sklearn.datasets._arff_parser/_pandas_arff_parser/feature_names_to_select", - "name": "feature_names_to_select", - "qname": "sklearn.datasets._arff_parser._pandas_arff_parser.feature_names_to_select", + "id": "sklearn/sklearn.datasets._arff_parser/_liac_arff_parser/data_columns", + "name": "data_columns", + "qname": "sklearn.datasets._arff_parser._liac_arff_parser.data_columns", "default_value": null, "assigned_by": "POSITION_OR_NAME", "is_public": false, "docstring": { - "type": "list of str", - "description": "A list of the feature names to be selected to build `X`." + "type": "", + "description": "" }, - "type": { - "kind": "NamedType", - "name": "list of str" - } + "type": {} }, { - "id": "sklearn/sklearn.datasets._arff_parser/_pandas_arff_parser/target_names_to_select", - "name": "target_names_to_select", - "qname": "sklearn.datasets._arff_parser._pandas_arff_parser.target_names_to_select", + "id": "sklearn/sklearn.datasets._arff_parser/_liac_arff_parser/target_columns", + "name": "target_columns", + "qname": "sklearn.datasets._arff_parser._liac_arff_parser.target_columns", "default_value": null, "assigned_by": "POSITION_OR_NAME", "is_public": false, "docstring": { - "type": "list of str", - "description": "A list of the target names to be selected to build `y`." + "type": "", + "description": "" }, - "type": { - "kind": "NamedType", - "name": "list of str" - } - } - ], - "results": [], - "is_public": false, - "description": "ARFF parser using `pandas.read_csv`.\n\nThis parser uses the metadata fetched directly from OpenML and skips the metadata\nheaders of ARFF file itself. The data is loaded as a CSV file.", - "docstring": "ARFF parser using `pandas.read_csv`.\n\n This parser uses the metadata fetched directly from OpenML and skips the metadata\n headers of ARFF file itself. The data is loaded as a CSV file.\n\n Parameters\n ----------\n gzip_file : GzipFile instance\n The GZip compressed file with the ARFF formatted payload.\n\n output_type : {\"numpy\", \"sparse\", \"pandas\"}\n The type of the arrays that will be returned. The possibilities are:\n\n - `\"numpy\"`: both `X` and `y` will be NumPy arrays;\n - `\"sparse\"`: `X` will be sparse matrix and `y` will be a NumPy array;\n - `\"pandas\"`: `X` will be a pandas DataFrame and `y` will be either a\n pandas Series or DataFrame.\n\n openml_columns_info : dict\n The information provided by OpenML regarding the columns of the ARFF\n file.\n\n feature_names_to_select : list of str\n A list of the feature names to be selected to build `X`.\n\n target_names_to_select : list of str\n A list of the target names to be selected to build `y`.\n\n Returns\n -------\n X : {ndarray, sparse matrix, dataframe}\n The data matrix.\n\n y : {ndarray, dataframe, series}\n The target.\n\n frame : dataframe or None\n A dataframe containing both `X` and `y`. `None` if\n `output_array_type != \"pandas\"`.\n\n categories : list of str or None\n The names of the features that are categorical. `None` if\n `output_array_type == \"pandas\"`.\n " - }, - { - "id": "sklearn/sklearn.datasets._arff_parser/_post_process_frame", - "name": "_post_process_frame", - "qname": "sklearn.datasets._arff_parser._post_process_frame", - "decorators": [], - "parameters": [ + "type": {} + }, { - "id": "sklearn/sklearn.datasets._arff_parser/_post_process_frame/frame", - "name": "frame", - "qname": "sklearn.datasets._arff_parser._post_process_frame.frame", - "default_value": null, + "id": "sklearn/sklearn.datasets._arff_parser/_liac_arff_parser/col_slice_x", + "name": "col_slice_x", + "qname": "sklearn.datasets._arff_parser._liac_arff_parser.col_slice_x", + "default_value": "None", "assigned_by": "POSITION_OR_NAME", "is_public": false, "docstring": { - "type": "dataframe", - "description": "The dataframe to split into `X` and `y`." + "type": "", + "description": "" }, - "type": { - "kind": "NamedType", - "name": "dataframe" - } + "type": {} }, { - "id": "sklearn/sklearn.datasets._arff_parser/_post_process_frame/feature_names", - "name": "feature_names", - "qname": "sklearn.datasets._arff_parser._post_process_frame.feature_names", - "default_value": null, + "id": "sklearn/sklearn.datasets._arff_parser/_liac_arff_parser/col_slice_y", + "name": "col_slice_y", + "qname": "sklearn.datasets._arff_parser._liac_arff_parser.col_slice_y", + "default_value": "None", "assigned_by": "POSITION_OR_NAME", "is_public": false, "docstring": { - "type": "list of str", - "description": "The list of feature names to populate `X`." + "type": "", + "description": "" }, - "type": { - "kind": "NamedType", - "name": "list of str" - } + "type": {} }, { - "id": "sklearn/sklearn.datasets._arff_parser/_post_process_frame/target_names", - "name": "target_names", - "qname": "sklearn.datasets._arff_parser._post_process_frame.target_names", - "default_value": null, + "id": "sklearn/sklearn.datasets._arff_parser/_liac_arff_parser/shape", + "name": "shape", + "qname": "sklearn.datasets._arff_parser._liac_arff_parser.shape", + "default_value": "None", "assigned_by": "POSITION_OR_NAME", "is_public": false, "docstring": { - "type": "list of str", - "description": "The list of target names to populate `y`." + "type": "", + "description": "" }, - "type": { - "kind": "NamedType", - "name": "list of str" - } + "type": {} } ], "results": [], "is_public": false, - "description": "Post process a dataframe to select the desired columns in `X` and `y`.", - "docstring": "Post process a dataframe to select the desired columns in `X` and `y`.\n\n Parameters\n ----------\n frame : dataframe\n The dataframe to split into `X` and `y`.\n\n feature_names : list of str\n The list of feature names to populate `X`.\n\n target_names : list of str\n The list of target names to populate `y`.\n\n Returns\n -------\n X : dataframe\n The dataframe containing the features.\n\n y : {series, dataframe} or None\n The series or dataframe containing the target.\n " + "description": "", + "docstring": null }, { "id": "sklearn/sklearn.datasets._arff_parser/_sparse_data_to_array", @@ -53543,129 +53213,8 @@ ], "results": [], "is_public": false, - "description": "Obtains several columns from sparse ARFF representation. Additionally,\nthe column indices are re-labelled, given the columns that are not\nincluded. (e.g., when including [1, 2, 3], the columns will be relabelled\nto [0, 1, 2]).", - "docstring": "Obtains several columns from sparse ARFF representation. Additionally,\n the column indices are re-labelled, given the columns that are not\n included. (e.g., when including [1, 2, 3], the columns will be relabelled\n to [0, 1, 2]).\n\n Parameters\n ----------\n arff_data : tuple\n A tuple of three lists of equal size; first list indicating the value,\n second the x coordinate and the third the y coordinate.\n\n include_columns : list\n A list of columns to include.\n\n Returns\n -------\n arff_data_new : tuple\n Subset of arff data with only the include columns indicated by the\n include_columns argument.\n " - }, - { - "id": "sklearn/sklearn.datasets._arff_parser/load_arff_from_gzip_file", - "name": "load_arff_from_gzip_file", - "qname": "sklearn.datasets._arff_parser.load_arff_from_gzip_file", - "decorators": [], - "parameters": [ - { - "id": "sklearn/sklearn.datasets._arff_parser/load_arff_from_gzip_file/gzip_file", - "name": "gzip_file", - "qname": "sklearn.datasets._arff_parser.load_arff_from_gzip_file.gzip_file", - "default_value": null, - "assigned_by": "POSITION_OR_NAME", - "is_public": false, - "docstring": { - "type": "GzipFile instance", - "description": "The file compressed to be read." - }, - "type": { - "kind": "NamedType", - "name": "GzipFile instance" - } - }, - { - "id": "sklearn/sklearn.datasets._arff_parser/load_arff_from_gzip_file/parser", - "name": "parser", - "qname": "sklearn.datasets._arff_parser.load_arff_from_gzip_file.parser", - "default_value": null, - "assigned_by": "POSITION_OR_NAME", - "is_public": false, - "docstring": { - "type": "{\"pandas\", \"liac-arff\"}", - "description": "The parser used to parse the ARFF file. \"pandas\" is recommended\nbut only supports loading dense datasets." - }, - "type": { - "kind": "EnumType", - "values": ["pandas", "liac-arff"] - } - }, - { - "id": "sklearn/sklearn.datasets._arff_parser/load_arff_from_gzip_file/output_type", - "name": "output_type", - "qname": "sklearn.datasets._arff_parser.load_arff_from_gzip_file.output_type", - "default_value": null, - "assigned_by": "POSITION_OR_NAME", - "is_public": false, - "docstring": { - "type": "{\"numpy\", \"sparse\", \"pandas\"}", - "description": "The type of the arrays that will be returned. The possibilities ara:\n\n- `\"numpy\"`: both `X` and `y` will be NumPy arrays;\n- `\"sparse\"`: `X` will be sparse matrix and `y` will be a NumPy array;\n- `\"pandas\"`: `X` will be a pandas DataFrame and `y` will be either a\n pandas Series or DataFrame." - }, - "type": { - "kind": "EnumType", - "values": ["pandas", "sparse", "numpy"] - } - }, - { - "id": "sklearn/sklearn.datasets._arff_parser/load_arff_from_gzip_file/openml_columns_info", - "name": "openml_columns_info", - "qname": "sklearn.datasets._arff_parser.load_arff_from_gzip_file.openml_columns_info", - "default_value": null, - "assigned_by": "POSITION_OR_NAME", - "is_public": false, - "docstring": { - "type": "dict", - "description": "The information provided by OpenML regarding the columns of the ARFF\nfile." - }, - "type": { - "kind": "NamedType", - "name": "dict" - } - }, - { - "id": "sklearn/sklearn.datasets._arff_parser/load_arff_from_gzip_file/feature_names_to_select", - "name": "feature_names_to_select", - "qname": "sklearn.datasets._arff_parser.load_arff_from_gzip_file.feature_names_to_select", - "default_value": null, - "assigned_by": "POSITION_OR_NAME", - "is_public": false, - "docstring": { - "type": "list of str", - "description": "A list of the feature names to be selected." - }, - "type": { - "kind": "NamedType", - "name": "list of str" - } - }, - { - "id": "sklearn/sklearn.datasets._arff_parser/load_arff_from_gzip_file/target_names_to_select", - "name": "target_names_to_select", - "qname": "sklearn.datasets._arff_parser.load_arff_from_gzip_file.target_names_to_select", - "default_value": null, - "assigned_by": "POSITION_OR_NAME", - "is_public": false, - "docstring": { - "type": "list of str", - "description": "A list of the target names to be selected." - }, - "type": { - "kind": "NamedType", - "name": "list of str" - } - }, - { - "id": "sklearn/sklearn.datasets._arff_parser/load_arff_from_gzip_file/shape", - "name": "shape", - "qname": "sklearn.datasets._arff_parser.load_arff_from_gzip_file.shape", - "default_value": "None", - "assigned_by": "POSITION_OR_NAME", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - } - ], - "results": [], - "is_public": false, - "description": "Load a compressed ARFF file using a given parser.", - "docstring": "Load a compressed ARFF file using a given parser.\n\n Parameters\n ----------\n gzip_file : GzipFile instance\n The file compressed to be read.\n\n parser : {\"pandas\", \"liac-arff\"}\n The parser used to parse the ARFF file. \"pandas\" is recommended\n but only supports loading dense datasets.\n\n output_type : {\"numpy\", \"sparse\", \"pandas\"}\n The type of the arrays that will be returned. The possibilities ara:\n\n - `\"numpy\"`: both `X` and `y` will be NumPy arrays;\n - `\"sparse\"`: `X` will be sparse matrix and `y` will be a NumPy array;\n - `\"pandas\"`: `X` will be a pandas DataFrame and `y` will be either a\n pandas Series or DataFrame.\n\n openml_columns_info : dict\n The information provided by OpenML regarding the columns of the ARFF\n file.\n\n feature_names_to_select : list of str\n A list of the feature names to be selected.\n\n target_names_to_select : list of str\n A list of the target names to be selected.\n\n Returns\n -------\n X : {ndarray, sparse matrix, dataframe}\n The data matrix.\n\n y : {ndarray, dataframe, series}\n The target.\n\n frame : dataframe or None\n A dataframe containing both `X` and `y`. `None` if\n `output_array_type != \"pandas\"`.\n\n categories : list of str or None\n The names of the features that are categorical. `None` if\n `output_array_type == \"pandas\"`.\n " + "description": "obtains several columns from sparse arff representation. Additionally, the\ncolumn indices are re-labelled, given the columns that are not included.\n(e.g., when including [1, 2, 3], the columns will be relabelled to\n[0, 1, 2])", + "docstring": "\n obtains several columns from sparse arff representation. Additionally, the\n column indices are re-labelled, given the columns that are not included.\n (e.g., when including [1, 2, 3], the columns will be relabelled to\n [0, 1, 2])\n\n Parameters\n ----------\n arff_data : tuple\n A tuple of three lists of equal size; first list indicating the value,\n second the x coordinate and the third the y coordinate.\n\n include_columns : list\n A list of columns to include.\n\n Returns\n -------\n arff_data_new : tuple\n Subset of arff data with only the include columns indicated by the\n include_columns argument.\n " }, { "id": "sklearn/sklearn.datasets._base/_convert_data_dataframe", @@ -54985,7 +54534,7 @@ }, "type": { "kind": "EnumType", - "values": ["smtp", "SA", "http", "SF"] + "values": ["http", "SF", "smtp", "SA"] } }, { @@ -55407,7 +54956,7 @@ }, "type": { "kind": "EnumType", - "values": ["10_folds", "train", "test"] + "values": ["test", "train", "10_folds"] } }, { @@ -55771,13 +55320,10 @@ "assigned_by": "POSITION_OR_NAME", "is_public": false, "docstring": { - "type": "str", - "description": "The URL of the ARFF file on OpenML." + "type": "", + "description": "" }, - "type": { - "kind": "NamedType", - "name": "str" - } + "type": {} }, { "id": "sklearn/sklearn.datasets._openml/_download_data_to_bunch/sparse", @@ -55787,13 +55333,10 @@ "assigned_by": "POSITION_OR_NAME", "is_public": false, "docstring": { - "type": "bool", - "description": "Whether the dataset is expected to use the sparse ARFF format." + "type": "", + "description": "" }, - "type": { - "kind": "NamedType", - "name": "bool" - } + "type": {} }, { "id": "sklearn/sklearn.datasets._openml/_download_data_to_bunch/data_home", @@ -55803,13 +55346,10 @@ "assigned_by": "POSITION_OR_NAME", "is_public": false, "docstring": { - "type": "str", - "description": "The location where to cache the data." + "type": "", + "description": "" }, - "type": { - "kind": "NamedType", - "name": "str" - } + "type": {} }, { "id": "sklearn/sklearn.datasets._openml/_download_data_to_bunch/as_frame", @@ -55819,29 +55359,23 @@ "assigned_by": "NAME_ONLY", "is_public": false, "docstring": { - "type": "bool", - "description": "Whether or not to return the data into a pandas DataFrame." + "type": "", + "description": "" }, - "type": { - "kind": "NamedType", - "name": "bool" - } + "type": {} }, { - "id": "sklearn/sklearn.datasets._openml/_download_data_to_bunch/openml_columns_info", - "name": "openml_columns_info", - "qname": "sklearn.datasets._openml._download_data_to_bunch.openml_columns_info", + "id": "sklearn/sklearn.datasets._openml/_download_data_to_bunch/features_list", + "name": "features_list", + "qname": "sklearn.datasets._openml._download_data_to_bunch.features_list", "default_value": null, "assigned_by": "NAME_ONLY", "is_public": false, "docstring": { - "type": "list of dict", - "description": "The information regarding the columns provided by OpenML for the\nARFF dataset. The information is stored as a list of dictionaries." + "type": "", + "description": "" }, - "type": { - "kind": "NamedType", - "name": "list of dict" - } + "type": {} }, { "id": "sklearn/sklearn.datasets._openml/_download_data_to_bunch/data_columns", @@ -55851,13 +55385,10 @@ "assigned_by": "NAME_ONLY", "is_public": false, "docstring": { - "type": "list of str", - "description": "The list of the features to be selected." + "type": "", + "description": "" }, - "type": { - "kind": "NamedType", - "name": "list of str" - } + "type": {} }, { "id": "sklearn/sklearn.datasets._openml/_download_data_to_bunch/target_columns", @@ -55867,13 +55398,10 @@ "assigned_by": "NAME_ONLY", "is_public": false, "docstring": { - "type": "list of str", - "description": "The list of the target variables to be selected." + "type": "", + "description": "" }, - "type": { - "kind": "NamedType", - "name": "list of str" - } + "type": {} }, { "id": "sklearn/sklearn.datasets._openml/_download_data_to_bunch/shape", @@ -55883,22 +55411,10 @@ "assigned_by": "NAME_ONLY", "is_public": false, "docstring": { - "type": "tuple or None", - "description": "With `parser=\"liac-arff\"`, when using a generator to load the data,\none needs to provide the shape of the data beforehand." + "type": "", + "description": "" }, - "type": { - "kind": "UnionType", - "types": [ - { - "kind": "NamedType", - "name": "tuple" - }, - { - "kind": "NamedType", - "name": "None" - } - ] - } + "type": {} }, { "id": "sklearn/sklearn.datasets._openml/_download_data_to_bunch/md5_checksum", @@ -55908,13 +55424,10 @@ "assigned_by": "NAME_ONLY", "is_public": false, "docstring": { - "type": "str", - "description": "The MD5 checksum provided by OpenML to check the data integrity." + "type": "", + "description": "" }, - "type": { - "kind": "NamedType", - "name": "str" - } + "type": {} }, { "id": "sklearn/sklearn.datasets._openml/_download_data_to_bunch/n_retries", @@ -55924,13 +55437,10 @@ "assigned_by": "NAME_ONLY", "is_public": false, "docstring": { - "type": "int, default=3", - "description": "Number of retries when HTTP errors are encountered. Error with status\ncode 412 won't be retried as they represent OpenML generic errors." + "type": "", + "description": "" }, - "type": { - "kind": "NamedType", - "name": "int" - } + "type": {} }, { "id": "sklearn/sklearn.datasets._openml/_download_data_to_bunch/delay", @@ -55940,35 +55450,16 @@ "assigned_by": "NAME_ONLY", "is_public": false, "docstring": { - "type": "float, default=1.0", - "description": "Number of seconds between retries." - }, - "type": { - "kind": "NamedType", - "name": "float" - } - }, - { - "id": "sklearn/sklearn.datasets._openml/_download_data_to_bunch/parser", - "name": "parser", - "qname": "sklearn.datasets._openml._download_data_to_bunch.parser", - "default_value": null, - "assigned_by": "NAME_ONLY", - "is_public": false, - "docstring": { - "type": "{\"liac-arff\", \"pandas\"}", - "description": "The parser used to parse the ARFF file." + "type": "", + "description": "" }, - "type": { - "kind": "EnumType", - "values": ["pandas", "liac-arff"] - } + "type": {} } ], "results": [], "is_public": false, - "description": "Download ARFF data, load it to a specific container and create to Bunch.\n\nThis function has a mechanism to retry/cache/clean the data.", - "docstring": "Download ARFF data, load it to a specific container and create to Bunch.\n\n This function has a mechanism to retry/cache/clean the data.\n\n Parameters\n ----------\n url : str\n The URL of the ARFF file on OpenML.\n\n sparse : bool\n Whether the dataset is expected to use the sparse ARFF format.\n\n data_home : str\n The location where to cache the data.\n\n as_frame : bool\n Whether or not to return the data into a pandas DataFrame.\n\n openml_columns_info : list of dict\n The information regarding the columns provided by OpenML for the\n ARFF dataset. The information is stored as a list of dictionaries.\n\n data_columns : list of str\n The list of the features to be selected.\n\n target_columns : list of str\n The list of the target variables to be selected.\n\n shape : tuple or None\n With `parser=\"liac-arff\"`, when using a generator to load the data,\n one needs to provide the shape of the data beforehand.\n\n md5_checksum : str\n The MD5 checksum provided by OpenML to check the data integrity.\n\n n_retries : int, default=3\n Number of retries when HTTP errors are encountered. Error with status\n code 412 won't be retried as they represent OpenML generic errors.\n\n delay : float, default=1.0\n Number of seconds between retries.\n\n parser : {\"liac-arff\", \"pandas\"}\n The parser used to parse the ARFF file.\n\n Returns\n -------\n data : :class:`~sklearn.utils.Bunch`\n Dictionary-like object, with the following attributes.\n\n X : {ndarray, sparse matrix, dataframe}\n The data matrix.\n y : {ndarray, dataframe, series}\n The target.\n frame : dataframe or None\n A dataframe containing both `X` and `y`. `None` if\n `output_array_type != \"pandas\"`.\n categories : list of str or None\n The names of the features that are categorical. `None` if\n `output_array_type == \"pandas\"`.\n " + "description": "Download OpenML ARFF and convert to Bunch of data", + "docstring": "Download OpenML ARFF and convert to Bunch of data" }, { "id": "sklearn/sklearn.datasets._openml/_get_data_description_by_id", @@ -56379,8 +55870,8 @@ ], "results": [], "is_public": false, - "description": "Loads json data from the openml api.", - "docstring": "\n Loads json data from the openml api.\n\n Parameters\n ----------\n url : str\n The URL to load from. Should be an official OpenML endpoint.\n\n error_message : str or None\n The error message to raise if an acceptable OpenML error is thrown\n (acceptable error is, e.g., data id not found. Other errors, like 404's\n will throw the native error message).\n\n data_home : str or None\n Location to cache the response. None if no cache is required.\n\n n_retries : int, default=3\n Number of retries when HTTP errors are encountered. Error with status\n code 412 won't be retried as they represent OpenML generic errors.\n\n delay : float, default=1.0\n Number of seconds between retries.\n\n Returns\n -------\n json_data : json\n the json result from the OpenML server if the call was successful.\n An exception otherwise.\n " + "description": "Loads json data from the openml api", + "docstring": "\n Loads json data from the openml api\n\n Parameters\n ----------\n url : str\n The URL to load from. Should be an official OpenML endpoint.\n\n error_message : str or None\n The error message to raise if an acceptable OpenML error is thrown\n (acceptable error is, e.g., data id not found. Other errors, like 404's\n will throw the native error message).\n\n data_home : str or None\n Location to cache the response. None if no cache is required.\n\n n_retries : int, default=3\n Number of retries when HTTP errors are encountered. Error with status\n code 412 won't be retried as they represent OpenML generic errors.\n\n delay : float, default=1.0\n Number of seconds between retries.\n\n Returns\n -------\n json_data : json\n the json result from the OpenML server if the call was successful.\n An exception otherwise.\n " }, { "id": "sklearn/sklearn.datasets._openml/_get_local_path", @@ -56462,13 +55953,10 @@ "assigned_by": "POSITION_OR_NAME", "is_public": false, "docstring": { - "type": "str", - "description": "The URL of the ARFF file on OpenML." + "type": "", + "description": "" }, - "type": { - "kind": "NamedType", - "name": "str" - } + "type": {} }, { "id": "sklearn/sklearn.datasets._openml/_load_arff_response/data_home", @@ -56478,93 +55966,88 @@ "assigned_by": "POSITION_OR_NAME", "is_public": false, "docstring": { - "type": "str", - "description": "The location where to cache the data." + "type": "", + "description": "" }, - "type": { - "kind": "NamedType", - "name": "str" - } + "type": {} }, { - "id": "sklearn/sklearn.datasets._openml/_load_arff_response/parser", - "name": "parser", - "qname": "sklearn.datasets._openml._load_arff_response.parser", + "id": "sklearn/sklearn.datasets._openml/_load_arff_response/output_arrays_type", + "name": "output_arrays_type", + "qname": "sklearn.datasets._openml._load_arff_response.output_arrays_type", "default_value": null, "assigned_by": "POSITION_OR_NAME", "is_public": false, "docstring": { - "type": "{\"liac-arff\", \"pandas\"}", - "description": "The parser used to parse the ARFF file." + "type": "", + "description": "" }, - "type": { - "kind": "EnumType", - "values": ["pandas", "liac-arff"] - } + "type": {} }, { - "id": "sklearn/sklearn.datasets._openml/_load_arff_response/output_type", - "name": "output_type", - "qname": "sklearn.datasets._openml._load_arff_response.output_type", + "id": "sklearn/sklearn.datasets._openml/_load_arff_response/features_dict", + "name": "features_dict", + "qname": "sklearn.datasets._openml._load_arff_response.features_dict", "default_value": null, "assigned_by": "POSITION_OR_NAME", "is_public": false, "docstring": { - "type": "{\"numpy\", \"pandas\", \"sparse\"}", - "description": "The type of the arrays that will be returned. The possibilities are:\n\n- `\"numpy\"`: both `X` and `y` will be NumPy arrays;\n- `\"sparse\"`: `X` will be sparse matrix and `y` will be a NumPy array;\n- `\"pandas\"`: `X` will be a pandas DataFrame and `y` will be either a\n pandas Series or DataFrame." + "type": "", + "description": "" }, - "type": { - "kind": "EnumType", - "values": ["pandas", "sparse", "numpy"] - } + "type": {} }, { - "id": "sklearn/sklearn.datasets._openml/_load_arff_response/openml_columns_info", - "name": "openml_columns_info", - "qname": "sklearn.datasets._openml._load_arff_response.openml_columns_info", + "id": "sklearn/sklearn.datasets._openml/_load_arff_response/data_columns", + "name": "data_columns", + "qname": "sklearn.datasets._openml._load_arff_response.data_columns", "default_value": null, "assigned_by": "POSITION_OR_NAME", "is_public": false, "docstring": { - "type": "dict", - "description": "The information provided by OpenML regarding the columns of the ARFF\nfile." + "type": "", + "description": "" }, - "type": { - "kind": "NamedType", - "name": "dict" - } + "type": {} }, { - "id": "sklearn/sklearn.datasets._openml/_load_arff_response/feature_names_to_select", - "name": "feature_names_to_select", - "qname": "sklearn.datasets._openml._load_arff_response.feature_names_to_select", + "id": "sklearn/sklearn.datasets._openml/_load_arff_response/target_columns", + "name": "target_columns", + "qname": "sklearn.datasets._openml._load_arff_response.target_columns", "default_value": null, "assigned_by": "POSITION_OR_NAME", "is_public": false, "docstring": { - "type": "list of str", - "description": "The list of the features to be selected." + "type": "", + "description": "" }, - "type": { - "kind": "NamedType", - "name": "list of str" - } + "type": {} }, { - "id": "sklearn/sklearn.datasets._openml/_load_arff_response/target_names_to_select", - "name": "target_names_to_select", - "qname": "sklearn.datasets._openml._load_arff_response.target_names_to_select", + "id": "sklearn/sklearn.datasets._openml/_load_arff_response/col_slice_x", + "name": "col_slice_x", + "qname": "sklearn.datasets._openml._load_arff_response.col_slice_x", "default_value": null, "assigned_by": "POSITION_OR_NAME", "is_public": false, "docstring": { - "type": "list of str", - "description": "The list of the target variables to be selected." + "type": "", + "description": "" }, - "type": { - "kind": "NamedType", - "name": "list of str" - } + "type": {} + }, + { + "id": "sklearn/sklearn.datasets._openml/_load_arff_response/col_slice_y", + "name": "col_slice_y", + "qname": "sklearn.datasets._openml._load_arff_response.col_slice_y", + "default_value": null, + "assigned_by": "POSITION_OR_NAME", + "is_public": false, + "docstring": { + "type": "", + "description": "" + }, + "type": {} }, { "id": "sklearn/sklearn.datasets._openml/_load_arff_response/shape", @@ -56574,22 +56057,10 @@ "assigned_by": "POSITION_OR_NAME", "is_public": false, "docstring": { - "type": "tuple or None", - "description": "With `parser=\"liac-arff\"`, when using a generator to load the data,\none needs to provide the shape of the data beforehand." + "type": "", + "description": "" }, - "type": { - "kind": "UnionType", - "types": [ - { - "kind": "NamedType", - "name": "tuple" - }, - { - "kind": "NamedType", - "name": "None" - } - ] - } + "type": {} }, { "id": "sklearn/sklearn.datasets._openml/_load_arff_response/md5_checksum", @@ -56599,13 +56070,10 @@ "assigned_by": "POSITION_OR_NAME", "is_public": false, "docstring": { - "type": "str", - "description": "The MD5 checksum provided by OpenML to check the data integrity." + "type": "", + "description": "" }, - "type": { - "kind": "NamedType", - "name": "str" - } + "type": {} }, { "id": "sklearn/sklearn.datasets._openml/_load_arff_response/n_retries", @@ -56636,8 +56104,8 @@ ], "results": [], "is_public": false, - "description": "Load the ARFF data associated with the OpenML URL.\n\nIn addition of loading the data, this function will also check the\nintegrity of the downloaded file from OpenML using MD5 checksum.", - "docstring": "Load the ARFF data associated with the OpenML URL.\n\n In addition of loading the data, this function will also check the\n integrity of the downloaded file from OpenML using MD5 checksum.\n\n Parameters\n ----------\n url : str\n The URL of the ARFF file on OpenML.\n\n data_home : str\n The location where to cache the data.\n\n parser : {\"liac-arff\", \"pandas\"}\n The parser used to parse the ARFF file.\n\n output_type : {\"numpy\", \"pandas\", \"sparse\"}\n The type of the arrays that will be returned. The possibilities are:\n\n - `\"numpy\"`: both `X` and `y` will be NumPy arrays;\n - `\"sparse\"`: `X` will be sparse matrix and `y` will be a NumPy array;\n - `\"pandas\"`: `X` will be a pandas DataFrame and `y` will be either a\n pandas Series or DataFrame.\n\n openml_columns_info : dict\n The information provided by OpenML regarding the columns of the ARFF\n file.\n\n feature_names_to_select : list of str\n The list of the features to be selected.\n\n target_names_to_select : list of str\n The list of the target variables to be selected.\n\n shape : tuple or None\n With `parser=\"liac-arff\"`, when using a generator to load the data,\n one needs to provide the shape of the data beforehand.\n\n md5_checksum : str\n The MD5 checksum provided by OpenML to check the data integrity.\n\n Returns\n -------\n X : {ndarray, sparse matrix, dataframe}\n The data matrix.\n\n y : {ndarray, dataframe, series}\n The target.\n\n frame : dataframe or None\n A dataframe containing both `X` and `y`. `None` if\n `output_array_type != \"pandas\"`.\n\n categories : list of str or None\n The names of the features that are categorical. `None` if\n `output_array_type == \"pandas\"`.\n " + "description": "Load arff data with url and parses arff response with parse_arff", + "docstring": "Load arff data with url and parses arff response with parse_arff" }, { "id": "sklearn/sklearn.datasets._openml/_open_openml_url", @@ -57029,7 +56497,7 @@ "is_public": true, "docstring": { "type": "bool or 'auto', default='auto'", - "description": "If True, the data is a pandas DataFrame including columns with\nappropriate dtypes (numeric, string or categorical). The target is\na pandas DataFrame or Series depending on the number of target_columns.\nThe Bunch will contain a ``frame`` attribute with the target and the\ndata. If ``return_X_y`` is True, then ``(data, target)`` will be pandas\nDataFrames or Series as describe above.\n\nIf `as_frame` is 'auto', the data and target will be converted to\nDataFrame or Series as if `as_frame` is set to True, unless the dataset\nis stored in sparse format.\n\nIf `as_frame` is False, the data and target will be NumPy arrays and\nthe `data` will only contain numerical values when `parser=\"liac-arff\"`\nwhere the categories are provided in the attribute `categories` of the\n`Bunch` instance. When `parser=\"pandas\"`, no ordinal encoding is made.\n\n.. versionchanged:: 0.24\n The default value of `as_frame` changed from `False` to `'auto'`\n in 0.24." + "description": "If True, the data is a pandas DataFrame including columns with\nappropriate dtypes (numeric, string or categorical). The target is\na pandas DataFrame or Series depending on the number of target_columns.\nThe Bunch will contain a ``frame`` attribute with the target and the\ndata. If ``return_X_y`` is True, then ``(data, target)`` will be pandas\nDataFrames or Series as describe above.\n\nIf as_frame is 'auto', the data and target will be converted to\nDataFrame or Series as if as_frame is set to True, unless the dataset\nis stored in sparse format.\n\n.. versionchanged:: 0.24\n The default value of `as_frame` changed from `False` to `'auto'`\n in 0.24." }, "type": { "kind": "UnionType", @@ -57076,28 +56544,12 @@ "kind": "NamedType", "name": "float" } - }, - { - "id": "sklearn/sklearn.datasets._openml/fetch_openml/parser", - "name": "parser", - "qname": "sklearn.datasets._openml.fetch_openml.parser", - "default_value": "'warn'", - "assigned_by": "NAME_ONLY", - "is_public": true, - "docstring": { - "type": "{\"auto\", \"pandas\", \"liac-arff\"}, default=\"liac-arff\"", - "description": "Parser used to load the ARFF file. Two parsers are implemented:\n\n- `\"pandas\"`: this is the most efficient parser. However, it requires\n pandas to be installed and can only open dense datasets.\n- `\"liac-arff\"`: this is a pure Python ARFF parser that is much less\n memory- and CPU-efficient. It deals with sparse ARFF dataset.\n\nIf `\"auto\"` (future default), the parser is chosen automatically such that\n`\"liac-arff\"` is selected for sparse ARFF datasets, otherwise\n`\"pandas\"` is selected.\n\n.. versionadded:: 1.2\n.. versionchanged:: 1.4\n The default value of `parser` will change from `\"liac-arff\"` to\n `\"auto\"` in 1.4. You can set `parser=\"auto\"` to silence this\n warning. Therefore, an `ImportError` will be raised from 1.4 if\n the dataset is dense and pandas is not installed." - }, - "type": { - "kind": "EnumType", - "values": ["auto", "pandas", "liac-arff"] - } } ], "results": [], "is_public": true, "description": "Fetch dataset from openml by name or dataset id.\n\nDatasets are uniquely identified by either an integer ID or by a\ncombination of name and version (i.e. there might be multiple\nversions of the 'iris' dataset). Please give either name or data_id\n(not both). In case a name is given, a version can also be\nprovided.\n\nRead more in the :ref:`User Guide `.\n\n.. versionadded:: 0.20\n\n.. note:: EXPERIMENTAL\n\n The API is experimental (particularly the return value structure),\n and might have small backward-incompatible changes without notice\n or warning in future releases.", - "docstring": "Fetch dataset from openml by name or dataset id.\n\n Datasets are uniquely identified by either an integer ID or by a\n combination of name and version (i.e. there might be multiple\n versions of the 'iris' dataset). Please give either name or data_id\n (not both). In case a name is given, a version can also be\n provided.\n\n Read more in the :ref:`User Guide `.\n\n .. versionadded:: 0.20\n\n .. note:: EXPERIMENTAL\n\n The API is experimental (particularly the return value structure),\n and might have small backward-incompatible changes without notice\n or warning in future releases.\n\n Parameters\n ----------\n name : str, default=None\n String identifier of the dataset. Note that OpenML can have multiple\n datasets with the same name.\n\n version : int or 'active', default='active'\n Version of the dataset. Can only be provided if also ``name`` is given.\n If 'active' the oldest version that's still active is used. Since\n there may be more than one active version of a dataset, and those\n versions may fundamentally be different from one another, setting an\n exact version is highly recommended.\n\n data_id : int, default=None\n OpenML ID of the dataset. The most specific way of retrieving a\n dataset. If data_id is not given, name (and potential version) are\n used to obtain a dataset.\n\n data_home : str, default=None\n Specify another download and cache folder for the data sets. By default\n all scikit-learn data is stored in '~/scikit_learn_data' subfolders.\n\n target_column : str, list or None, default='default-target'\n Specify the column name in the data to use as target. If\n 'default-target', the standard target column a stored on the server\n is used. If ``None``, all columns are returned as data and the\n target is ``None``. If list (of strings), all columns with these names\n are returned as multi-target (Note: not all scikit-learn classifiers\n can handle all types of multi-output combinations).\n\n cache : bool, default=True\n Whether to cache the downloaded datasets into `data_home`.\n\n return_X_y : bool, default=False\n If True, returns ``(data, target)`` instead of a Bunch object. See\n below for more information about the `data` and `target` objects.\n\n as_frame : bool or 'auto', default='auto'\n If True, the data is a pandas DataFrame including columns with\n appropriate dtypes (numeric, string or categorical). The target is\n a pandas DataFrame or Series depending on the number of target_columns.\n The Bunch will contain a ``frame`` attribute with the target and the\n data. If ``return_X_y`` is True, then ``(data, target)`` will be pandas\n DataFrames or Series as describe above.\n\n If `as_frame` is 'auto', the data and target will be converted to\n DataFrame or Series as if `as_frame` is set to True, unless the dataset\n is stored in sparse format.\n\n If `as_frame` is False, the data and target will be NumPy arrays and\n the `data` will only contain numerical values when `parser=\"liac-arff\"`\n where the categories are provided in the attribute `categories` of the\n `Bunch` instance. When `parser=\"pandas\"`, no ordinal encoding is made.\n\n .. versionchanged:: 0.24\n The default value of `as_frame` changed from `False` to `'auto'`\n in 0.24.\n\n n_retries : int, default=3\n Number of retries when HTTP errors or network timeouts are encountered.\n Error with status code 412 won't be retried as they represent OpenML\n generic errors.\n\n delay : float, default=1.0\n Number of seconds between retries.\n\n parser : {\"auto\", \"pandas\", \"liac-arff\"}, default=\"liac-arff\"\n Parser used to load the ARFF file. Two parsers are implemented:\n\n - `\"pandas\"`: this is the most efficient parser. However, it requires\n pandas to be installed and can only open dense datasets.\n - `\"liac-arff\"`: this is a pure Python ARFF parser that is much less\n memory- and CPU-efficient. It deals with sparse ARFF dataset.\n\n If `\"auto\"` (future default), the parser is chosen automatically such that\n `\"liac-arff\"` is selected for sparse ARFF datasets, otherwise\n `\"pandas\"` is selected.\n\n .. versionadded:: 1.2\n .. versionchanged:: 1.4\n The default value of `parser` will change from `\"liac-arff\"` to\n `\"auto\"` in 1.4. You can set `parser=\"auto\"` to silence this\n warning. Therefore, an `ImportError` will be raised from 1.4 if\n the dataset is dense and pandas is not installed.\n\n Returns\n -------\n data : :class:`~sklearn.utils.Bunch`\n Dictionary-like object, with the following attributes.\n\n data : np.array, scipy.sparse.csr_matrix of floats, or pandas DataFrame\n The feature matrix. Categorical features are encoded as ordinals.\n target : np.array, pandas Series or DataFrame\n The regression target or classification labels, if applicable.\n Dtype is float if numeric, and object if categorical. If\n ``as_frame`` is True, ``target`` is a pandas object.\n DESCR : str\n The full description of the dataset.\n feature_names : list\n The names of the dataset columns.\n target_names: list\n The names of the target columns.\n\n .. versionadded:: 0.22\n\n categories : dict or None\n Maps each categorical feature name to a list of values, such\n that the value encoded as i is ith in the list. If ``as_frame``\n is True, this is None.\n details : dict\n More metadata from OpenML.\n frame : pandas DataFrame\n Only present when `as_frame=True`. DataFrame with ``data`` and\n ``target``.\n\n (data, target) : tuple if ``return_X_y`` is True\n\n .. note:: EXPERIMENTAL\n\n This interface is **experimental** and subsequent releases may\n change attributes without notice (although there should only be\n minor changes to ``data`` and ``target``).\n\n Missing values in the 'data' are represented as NaN's. Missing values\n in 'target' are represented as NaN's (numerical target) or None\n (categorical target).\n\n Notes\n -----\n The `\"pandas\"` and `\"liac-arff\"` parsers can lead to different data types\n in the output. The notable differences are the following:\n\n - The `\"liac-arff\"` parser always encodes categorical features as `str` objects.\n To the contrary, the `\"pandas\"` parser instead infers the type while\n reading and numerical categories will be casted into integers whenever\n possible.\n - The `\"liac-arff\"` parser uses float64 to encode numerical features\n tagged as 'REAL' and 'NUMERICAL' in the metadata. The `\"pandas\"`\n parser instead infers if these numerical features corresponds\n to integers and uses panda's Integer extension dtype.\n - In particular, classification datasets with integer categories are\n typically loaded as such `(0, 1, ...)` with the `\"pandas\"` parser while\n `\"liac-arff\"` will force the use of string encoded class labels such as\n `\"0\"`, `\"1\"` and so on.\n\n In addition, when `as_frame=False` is used, the `\"liac-arff\"` parser\n returns ordinally encoded data where the categories are provided in the\n attribute `categories` of the `Bunch` instance. Instead, `\"pandas\"` returns\n a NumPy array were the categories are not encoded.\n " + "docstring": "Fetch dataset from openml by name or dataset id.\n\n Datasets are uniquely identified by either an integer ID or by a\n combination of name and version (i.e. there might be multiple\n versions of the 'iris' dataset). Please give either name or data_id\n (not both). In case a name is given, a version can also be\n provided.\n\n Read more in the :ref:`User Guide `.\n\n .. versionadded:: 0.20\n\n .. note:: EXPERIMENTAL\n\n The API is experimental (particularly the return value structure),\n and might have small backward-incompatible changes without notice\n or warning in future releases.\n\n Parameters\n ----------\n name : str, default=None\n String identifier of the dataset. Note that OpenML can have multiple\n datasets with the same name.\n\n version : int or 'active', default='active'\n Version of the dataset. Can only be provided if also ``name`` is given.\n If 'active' the oldest version that's still active is used. Since\n there may be more than one active version of a dataset, and those\n versions may fundamentally be different from one another, setting an\n exact version is highly recommended.\n\n data_id : int, default=None\n OpenML ID of the dataset. The most specific way of retrieving a\n dataset. If data_id is not given, name (and potential version) are\n used to obtain a dataset.\n\n data_home : str, default=None\n Specify another download and cache folder for the data sets. By default\n all scikit-learn data is stored in '~/scikit_learn_data' subfolders.\n\n target_column : str, list or None, default='default-target'\n Specify the column name in the data to use as target. If\n 'default-target', the standard target column a stored on the server\n is used. If ``None``, all columns are returned as data and the\n target is ``None``. If list (of strings), all columns with these names\n are returned as multi-target (Note: not all scikit-learn classifiers\n can handle all types of multi-output combinations).\n\n cache : bool, default=True\n Whether to cache the downloaded datasets into `data_home`.\n\n return_X_y : bool, default=False\n If True, returns ``(data, target)`` instead of a Bunch object. See\n below for more information about the `data` and `target` objects.\n\n as_frame : bool or 'auto', default='auto'\n If True, the data is a pandas DataFrame including columns with\n appropriate dtypes (numeric, string or categorical). The target is\n a pandas DataFrame or Series depending on the number of target_columns.\n The Bunch will contain a ``frame`` attribute with the target and the\n data. If ``return_X_y`` is True, then ``(data, target)`` will be pandas\n DataFrames or Series as describe above.\n\n If as_frame is 'auto', the data and target will be converted to\n DataFrame or Series as if as_frame is set to True, unless the dataset\n is stored in sparse format.\n\n .. versionchanged:: 0.24\n The default value of `as_frame` changed from `False` to `'auto'`\n in 0.24.\n\n n_retries : int, default=3\n Number of retries when HTTP errors or network timeouts are encountered.\n Error with status code 412 won't be retried as they represent OpenML\n generic errors.\n\n delay : float, default=1.0\n Number of seconds between retries.\n\n Returns\n -------\n\n data : :class:`~sklearn.utils.Bunch`\n Dictionary-like object, with the following attributes.\n\n data : np.array, scipy.sparse.csr_matrix of floats, or pandas DataFrame\n The feature matrix. Categorical features are encoded as ordinals.\n target : np.array, pandas Series or DataFrame\n The regression target or classification labels, if applicable.\n Dtype is float if numeric, and object if categorical. If\n ``as_frame`` is True, ``target`` is a pandas object.\n DESCR : str\n The full description of the dataset.\n feature_names : list\n The names of the dataset columns.\n target_names: list\n The names of the target columns.\n\n .. versionadded:: 0.22\n\n categories : dict or None\n Maps each categorical feature name to a list of values, such\n that the value encoded as i is ith in the list. If ``as_frame``\n is True, this is None.\n details : dict\n More metadata from OpenML.\n frame : pandas DataFrame\n Only present when `as_frame=True`. DataFrame with ``data`` and\n ``target``.\n\n (data, target) : tuple if ``return_X_y`` is True\n\n .. note:: EXPERIMENTAL\n\n This interface is **experimental** and subsequent releases may\n change attributes without notice (although there should only be\n minor changes to ``data`` and ``target``).\n\n Missing values in the 'data' are represented as NaN's. Missing values\n in 'target' are represented as NaN's (numerical target) or None\n (categorical target).\n " }, { "id": "sklearn/sklearn.datasets._rcv1/_find_permutation", @@ -57197,7 +56649,7 @@ }, "type": { "kind": "EnumType", - "values": ["train", "test", "all"] + "values": ["test", "train", "all"] } }, { @@ -60771,7 +60223,7 @@ }, "type": { "kind": "EnumType", - "values": ["train", "test", "all"] + "values": ["test", "train", "all"] } }, { @@ -60917,7 +60369,7 @@ }, "type": { "kind": "EnumType", - "values": ["train", "test", "all"] + "values": ["test", "train", "all"] } }, { @@ -61367,7 +60819,7 @@ "qname": "sklearn.decomposition._dict_learning.DictionaryLearning.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -61380,7 +60832,7 @@ "qname": "sklearn.decomposition._dict_learning.DictionaryLearning.__init__.n_components", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Number of dictionary elements to extract. If None, then ``n_components``\nis set to ``n_features``." @@ -61396,7 +60848,7 @@ "qname": "sklearn.decomposition._dict_learning.DictionaryLearning.__init__.alpha", "default_value": "1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1.0", "description": "Sparsity controlling parameter." @@ -61412,7 +60864,7 @@ "qname": "sklearn.decomposition._dict_learning.DictionaryLearning.__init__.max_iter", "default_value": "1000", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1000", "description": "Maximum number of iterations to perform." @@ -61428,7 +60880,7 @@ "qname": "sklearn.decomposition._dict_learning.DictionaryLearning.__init__.tol", "default_value": "1e-08", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-8", "description": "Tolerance for numerical error." @@ -61444,14 +60896,14 @@ "qname": "sklearn.decomposition._dict_learning.DictionaryLearning.__init__.fit_algorithm", "default_value": "'lars'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'lars', 'cd'}, default='lars'", "description": "* `'lars'`: uses the least angle regression method to solve the lasso\n problem (:func:`~sklearn.linear_model.lars_path`);\n* `'cd'`: uses the coordinate descent method to compute the\n Lasso solution (:class:`~sklearn.linear_model.Lasso`). Lars will be\n faster if the estimated components are sparse.\n\n.. versionadded:: 0.17\n *cd* coordinate descent method to improve speed." }, "type": { "kind": "EnumType", - "values": ["cd", "lars"] + "values": ["lars", "cd"] } }, { @@ -61460,14 +60912,14 @@ "qname": "sklearn.decomposition._dict_learning.DictionaryLearning.__init__.transform_algorithm", "default_value": "'omp'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}, default='omp'", "description": "Algorithm used to transform the data:\n\n- `'lars'`: uses the least angle regression method\n (:func:`~sklearn.linear_model.lars_path`);\n- `'lasso_lars'`: uses Lars to compute the Lasso solution.\n- `'lasso_cd'`: uses the coordinate descent method to compute the\n Lasso solution (:class:`~sklearn.linear_model.Lasso`). `'lasso_lars'`\n will be faster if the estimated components are sparse.\n- `'omp'`: uses orthogonal matching pursuit to estimate the sparse\n solution.\n- `'threshold'`: squashes to zero all coefficients less than alpha from\n the projection ``dictionary * X'``.\n\n.. versionadded:: 0.17\n *lasso_cd* coordinate descent method to improve speed." }, "type": { "kind": "EnumType", - "values": ["lasso_cd", "lasso_lars", "threshold", "lars", "omp"] + "values": ["lasso_cd", "lasso_lars", "threshold", "omp", "lars"] } }, { @@ -61476,7 +60928,7 @@ "qname": "sklearn.decomposition._dict_learning.DictionaryLearning.__init__.transform_n_nonzero_coefs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Number of nonzero coefficients to target in each column of the\nsolution. This is only used by `algorithm='lars'` and\n`algorithm='omp'`. If `None`, then\n`transform_n_nonzero_coefs=int(n_features / 10)`." @@ -61492,7 +60944,7 @@ "qname": "sklearn.decomposition._dict_learning.DictionaryLearning.__init__.transform_alpha", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=None", "description": "If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the\npenalty applied to the L1 norm.\nIf `algorithm='threshold'`, `alpha` is the absolute value of the\nthreshold below which coefficients will be squashed to zero.\nIf `None`, defaults to `alpha`." @@ -61508,7 +60960,7 @@ "qname": "sklearn.decomposition._dict_learning.DictionaryLearning.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or None, default=None", "description": "Number of parallel jobs to run.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details." @@ -61533,7 +60985,7 @@ "qname": "sklearn.decomposition._dict_learning.DictionaryLearning.__init__.code_init", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (n_samples, n_components), default=None", "description": "Initial value for the code, for warm restart. Only used if `code_init`\nand `dict_init` are not None." @@ -61549,7 +61001,7 @@ "qname": "sklearn.decomposition._dict_learning.DictionaryLearning.__init__.dict_init", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (n_components, n_features), default=None", "description": "Initial values for the dictionary, for warm restart. Only used if\n`code_init` and `dict_init` are not None." @@ -61565,7 +61017,7 @@ "qname": "sklearn.decomposition._dict_learning.DictionaryLearning.__init__.verbose", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "To control the verbosity of the procedure." @@ -61581,7 +61033,7 @@ "qname": "sklearn.decomposition._dict_learning.DictionaryLearning.__init__.split_sign", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Whether to split the sparse feature vector into the concatenation of\nits negative part and its positive part. This can improve the\nperformance of downstream classifiers." @@ -61597,7 +61049,7 @@ "qname": "sklearn.decomposition._dict_learning.DictionaryLearning.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Used for initializing the dictionary when ``dict_init`` is not\nspecified, randomly shuffling the data when ``shuffle`` is set to\n``True``, and updating the dictionary. Pass an int for reproducible\nresults across multiple function calls.\nSee :term:`Glossary `." @@ -61626,7 +61078,7 @@ "qname": "sklearn.decomposition._dict_learning.DictionaryLearning.__init__.positive_code", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Whether to enforce positivity when finding the code.\n\n.. versionadded:: 0.20" @@ -61642,7 +61094,7 @@ "qname": "sklearn.decomposition._dict_learning.DictionaryLearning.__init__.positive_dict", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Whether to enforce positivity when finding the dictionary.\n\n.. versionadded:: 0.20" @@ -61658,7 +61110,7 @@ "qname": "sklearn.decomposition._dict_learning.DictionaryLearning.__init__.transform_max_iter", "default_value": "1000", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1000", "description": "Maximum number of iterations to perform if `algorithm='lasso_cd'` or\n`'lasso_lars'`.\n\n.. versionadded:: 0.22" @@ -61670,7 +61122,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -61736,7 +61188,7 @@ "qname": "sklearn.decomposition._dict_learning.DictionaryLearning.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -61749,7 +61201,7 @@ "qname": "sklearn.decomposition._dict_learning.DictionaryLearning.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training vector, where `n_samples` is the number of samples\nand `n_features` is the number of features." @@ -61765,7 +61217,7 @@ "qname": "sklearn.decomposition._dict_learning.DictionaryLearning.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -61777,7 +61229,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the model from data in X.", "docstring": "Fit the model from data in X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training vector, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n " }, @@ -61793,7 +61245,7 @@ "qname": "sklearn.decomposition._dict_learning.MiniBatchDictionaryLearning.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -61806,7 +61258,7 @@ "qname": "sklearn.decomposition._dict_learning.MiniBatchDictionaryLearning.__init__.n_components", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Number of dictionary elements to extract." @@ -61822,7 +61274,7 @@ "qname": "sklearn.decomposition._dict_learning.MiniBatchDictionaryLearning.__init__.alpha", "default_value": "1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1", "description": "Sparsity controlling parameter." @@ -61838,7 +61290,7 @@ "qname": "sklearn.decomposition._dict_learning.MiniBatchDictionaryLearning.__init__.n_iter", "default_value": "'deprecated'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1000", "description": "Total number of iterations over data batches to perform.\n\n.. deprecated:: 1.1\n ``n_iter`` is deprecated in 1.1 and will be removed in 1.3. Use\n ``max_iter`` instead." @@ -61854,7 +61306,7 @@ "qname": "sklearn.decomposition._dict_learning.MiniBatchDictionaryLearning.__init__.max_iter", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Maximum number of iterations over the complete dataset before\nstopping independently of any early stopping criterion heuristics.\nIf ``max_iter`` is not None, ``n_iter`` is ignored.\n\n.. versionadded:: 1.1" @@ -61870,14 +61322,14 @@ "qname": "sklearn.decomposition._dict_learning.MiniBatchDictionaryLearning.__init__.fit_algorithm", "default_value": "'lars'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'lars', 'cd'}, default='lars'", "description": "The algorithm used:\n\n- `'lars'`: uses the least angle regression method to solve the lasso\n problem (`linear_model.lars_path`)\n- `'cd'`: uses the coordinate descent method to compute the\n Lasso solution (`linear_model.Lasso`). Lars will be faster if\n the estimated components are sparse." }, "type": { "kind": "EnumType", - "values": ["cd", "lars"] + "values": ["lars", "cd"] } }, { @@ -61886,7 +61338,7 @@ "qname": "sklearn.decomposition._dict_learning.MiniBatchDictionaryLearning.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Number of parallel jobs to run.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details." @@ -61902,7 +61354,7 @@ "qname": "sklearn.decomposition._dict_learning.MiniBatchDictionaryLearning.__init__.batch_size", "default_value": "'warn'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=3", "description": "Number of samples in each mini-batch." @@ -61918,7 +61370,7 @@ "qname": "sklearn.decomposition._dict_learning.MiniBatchDictionaryLearning.__init__.shuffle", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to shuffle the samples before forming batches." @@ -61934,7 +61386,7 @@ "qname": "sklearn.decomposition._dict_learning.MiniBatchDictionaryLearning.__init__.dict_init", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (n_components, n_features), default=None", "description": "Initial value of the dictionary for warm restart scenarios." @@ -61950,14 +61402,14 @@ "qname": "sklearn.decomposition._dict_learning.MiniBatchDictionaryLearning.__init__.transform_algorithm", "default_value": "'omp'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}, default='omp'", "description": "Algorithm used to transform the data:\n\n- `'lars'`: uses the least angle regression method\n (`linear_model.lars_path`);\n- `'lasso_lars'`: uses Lars to compute the Lasso solution.\n- `'lasso_cd'`: uses the coordinate descent method to compute the\n Lasso solution (`linear_model.Lasso`). `'lasso_lars'` will be faster\n if the estimated components are sparse.\n- `'omp'`: uses orthogonal matching pursuit to estimate the sparse\n solution.\n- `'threshold'`: squashes to zero all coefficients less than alpha from\n the projection ``dictionary * X'``." }, "type": { "kind": "EnumType", - "values": ["lasso_cd", "lasso_lars", "threshold", "lars", "omp"] + "values": ["lasso_cd", "lasso_lars", "threshold", "omp", "lars"] } }, { @@ -61966,7 +61418,7 @@ "qname": "sklearn.decomposition._dict_learning.MiniBatchDictionaryLearning.__init__.transform_n_nonzero_coefs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Number of nonzero coefficients to target in each column of the\nsolution. This is only used by `algorithm='lars'` and\n`algorithm='omp'`. If `None`, then\n`transform_n_nonzero_coefs=int(n_features / 10)`." @@ -61982,7 +61434,7 @@ "qname": "sklearn.decomposition._dict_learning.MiniBatchDictionaryLearning.__init__.transform_alpha", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=None", "description": "If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the\npenalty applied to the L1 norm.\nIf `algorithm='threshold'`, `alpha` is the absolute value of the\nthreshold below which coefficients will be squashed to zero.\nIf `None`, defaults to `alpha`." @@ -61998,7 +61450,7 @@ "qname": "sklearn.decomposition._dict_learning.MiniBatchDictionaryLearning.__init__.verbose", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool or int, default=False", "description": "To control the verbosity of the procedure." @@ -62023,7 +61475,7 @@ "qname": "sklearn.decomposition._dict_learning.MiniBatchDictionaryLearning.__init__.split_sign", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Whether to split the sparse feature vector into the concatenation of\nits negative part and its positive part. This can improve the\nperformance of downstream classifiers." @@ -62039,7 +61491,7 @@ "qname": "sklearn.decomposition._dict_learning.MiniBatchDictionaryLearning.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Used for initializing the dictionary when ``dict_init`` is not\nspecified, randomly shuffling the data when ``shuffle`` is set to\n``True``, and updating the dictionary. Pass an int for reproducible\nresults across multiple function calls.\nSee :term:`Glossary `." @@ -62068,7 +61520,7 @@ "qname": "sklearn.decomposition._dict_learning.MiniBatchDictionaryLearning.__init__.positive_code", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Whether to enforce positivity when finding the code.\n\n.. versionadded:: 0.20" @@ -62084,7 +61536,7 @@ "qname": "sklearn.decomposition._dict_learning.MiniBatchDictionaryLearning.__init__.positive_dict", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Whether to enforce positivity when finding the dictionary.\n\n.. versionadded:: 0.20" @@ -62100,7 +61552,7 @@ "qname": "sklearn.decomposition._dict_learning.MiniBatchDictionaryLearning.__init__.transform_max_iter", "default_value": "1000", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1000", "description": "Maximum number of iterations to perform if `algorithm='lasso_cd'` or\n`'lasso_lars'`.\n\n.. versionadded:: 0.22" @@ -62116,7 +61568,7 @@ "qname": "sklearn.decomposition._dict_learning.MiniBatchDictionaryLearning.__init__.callback", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "callable, default=None", "description": "A callable that gets invoked at the end of each iteration.\n\n.. versionadded:: 1.1" @@ -62132,7 +61584,7 @@ "qname": "sklearn.decomposition._dict_learning.MiniBatchDictionaryLearning.__init__.tol", "default_value": "0.001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-3", "description": "Control early stopping based on the norm of the differences in the\ndictionary between 2 steps. Used only if `max_iter` is not None.\n\nTo disable early stopping based on changes in the dictionary, set\n`tol` to 0.0.\n\n.. versionadded:: 1.1" @@ -62148,7 +61600,7 @@ "qname": "sklearn.decomposition._dict_learning.MiniBatchDictionaryLearning.__init__.max_no_improvement", "default_value": "10", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=10", "description": "Control early stopping based on the consecutive number of mini batches\nthat does not yield an improvement on the smoothed cost function. Used only if\n`max_iter` is not None.\n\nTo disable convergence detection based on cost function, set\n`max_no_improvement` to None.\n\n.. versionadded:: 1.1" @@ -62160,7 +61612,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -62585,7 +62037,7 @@ "qname": "sklearn.decomposition._dict_learning.MiniBatchDictionaryLearning.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -62598,7 +62050,7 @@ "qname": "sklearn.decomposition._dict_learning.MiniBatchDictionaryLearning.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training vector, where `n_samples` is the number of samples\nand `n_features` is the number of features." @@ -62614,7 +62066,7 @@ "qname": "sklearn.decomposition._dict_learning.MiniBatchDictionaryLearning.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -62626,7 +62078,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the model from data in X.", "docstring": "Fit the model from data in X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training vector, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n " }, @@ -62645,7 +62097,7 @@ "qname": "sklearn.decomposition._dict_learning.MiniBatchDictionaryLearning.inner_stats_.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -62654,7 +62106,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -62673,7 +62125,7 @@ "qname": "sklearn.decomposition._dict_learning.MiniBatchDictionaryLearning.iter_offset_.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -62682,7 +62134,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -62698,7 +62150,7 @@ "qname": "sklearn.decomposition._dict_learning.MiniBatchDictionaryLearning.partial_fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -62711,7 +62163,7 @@ "qname": "sklearn.decomposition._dict_learning.MiniBatchDictionaryLearning.partial_fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training vector, where `n_samples` is the number of samples\nand `n_features` is the number of features." @@ -62727,7 +62179,7 @@ "qname": "sklearn.decomposition._dict_learning.MiniBatchDictionaryLearning.partial_fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -62743,7 +62195,7 @@ "qname": "sklearn.decomposition._dict_learning.MiniBatchDictionaryLearning.partial_fit.iter_offset", "default_value": "'deprecated'", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "The number of iteration on data batches that has been\nperformed before this call to `partial_fit`. This is optional:\nif no number is passed, the memory of the object is\nused.\n\n.. deprecated:: 1.1\n ``iter_offset`` will be removed in 1.3." @@ -62755,7 +62207,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Update the model using the data in X as a mini-batch.", "docstring": "Update the model using the data in X as a mini-batch.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training vector, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n iter_offset : int, default=None\n The number of iteration on data batches that has been\n performed before this call to `partial_fit`. This is optional:\n if no number is passed, the memory of the object is\n used.\n\n .. deprecated:: 1.1\n ``iter_offset`` will be removed in 1.3.\n\n Returns\n -------\n self : object\n Return the instance itself.\n " }, @@ -62774,7 +62226,7 @@ "qname": "sklearn.decomposition._dict_learning.MiniBatchDictionaryLearning.random_state_.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -62783,7 +62235,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -62799,7 +62251,7 @@ "qname": "sklearn.decomposition._dict_learning.SparseCoder.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -62812,7 +62264,7 @@ "qname": "sklearn.decomposition._dict_learning.SparseCoder.__init__.dictionary", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (n_components, n_features)", "description": "The dictionary atoms used for sparse coding. Lines are assumed to be\nnormalized to unit norm." @@ -62828,14 +62280,14 @@ "qname": "sklearn.decomposition._dict_learning.SparseCoder.__init__.transform_algorithm", "default_value": "'omp'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}, default='omp'", "description": "Algorithm used to transform the data:\n\n- `'lars'`: uses the least angle regression method\n (`linear_model.lars_path`);\n- `'lasso_lars'`: uses Lars to compute the Lasso solution;\n- `'lasso_cd'`: uses the coordinate descent method to compute the\n Lasso solution (linear_model.Lasso). `'lasso_lars'` will be faster if\n the estimated components are sparse;\n- `'omp'`: uses orthogonal matching pursuit to estimate the sparse\n solution;\n- `'threshold'`: squashes to zero all coefficients less than alpha from\n the projection ``dictionary * X'``." }, "type": { "kind": "EnumType", - "values": ["lasso_cd", "lasso_lars", "threshold", "lars", "omp"] + "values": ["lasso_cd", "lasso_lars", "threshold", "omp", "lars"] } }, { @@ -62844,7 +62296,7 @@ "qname": "sklearn.decomposition._dict_learning.SparseCoder.__init__.transform_n_nonzero_coefs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Number of nonzero coefficients to target in each column of the\nsolution. This is only used by `algorithm='lars'` and `algorithm='omp'`\nand is overridden by `alpha` in the `omp` case. If `None`, then\n`transform_n_nonzero_coefs=int(n_features / 10)`." @@ -62860,7 +62312,7 @@ "qname": "sklearn.decomposition._dict_learning.SparseCoder.__init__.transform_alpha", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=None", "description": "If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the\npenalty applied to the L1 norm.\nIf `algorithm='threshold'`, `alpha` is the absolute value of the\nthreshold below which coefficients will be squashed to zero.\nIf `algorithm='omp'`, `alpha` is the tolerance parameter: the value of\nthe reconstruction error targeted. In this case, it overrides\n`n_nonzero_coefs`.\nIf `None`, default to 1." @@ -62876,7 +62328,7 @@ "qname": "sklearn.decomposition._dict_learning.SparseCoder.__init__.split_sign", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Whether to split the sparse feature vector into the concatenation of\nits negative part and its positive part. This can improve the\nperformance of downstream classifiers." @@ -62892,7 +62344,7 @@ "qname": "sklearn.decomposition._dict_learning.SparseCoder.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Number of parallel jobs to run.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details." @@ -62908,7 +62360,7 @@ "qname": "sklearn.decomposition._dict_learning.SparseCoder.__init__.positive_code", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Whether to enforce positivity when finding the code.\n\n.. versionadded:: 0.20" @@ -62924,7 +62376,7 @@ "qname": "sklearn.decomposition._dict_learning.SparseCoder.__init__.transform_max_iter", "default_value": "1000", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1000", "description": "Maximum number of iterations to perform if `algorithm='lasso_cd'` or\n`lasso_lars`.\n\n.. versionadded:: 0.22" @@ -62936,7 +62388,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -63002,7 +62454,7 @@ "qname": "sklearn.decomposition._dict_learning.SparseCoder.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -63015,7 +62467,7 @@ "qname": "sklearn.decomposition._dict_learning.SparseCoder.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -63031,7 +62483,7 @@ "qname": "sklearn.decomposition._dict_learning.SparseCoder.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -63043,7 +62495,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Do nothing and return the estimator unchanged.\n\nThis method is just there to implement the usual API and hence\nwork in pipelines.", "docstring": "Do nothing and return the estimator unchanged.\n\n This method is just there to implement the usual API and hence\n work in pipelines.\n\n Parameters\n ----------\n X : Ignored\n Not used, present for API consistency by convention.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n " }, @@ -63059,7 +62511,7 @@ "qname": "sklearn.decomposition._dict_learning.SparseCoder.n_components_.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -63068,7 +62520,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Number of atoms.", "docstring": "Number of atoms." }, @@ -63084,7 +62536,7 @@ "qname": "sklearn.decomposition._dict_learning.SparseCoder.n_features_in_.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -63093,7 +62545,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Number of features seen during `fit`.", "docstring": "Number of features seen during `fit`." }, @@ -63109,7 +62561,7 @@ "qname": "sklearn.decomposition._dict_learning.SparseCoder.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -63122,7 +62574,7 @@ "qname": "sklearn.decomposition._dict_learning.SparseCoder.transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (n_samples, n_features)", "description": "Training vector, where `n_samples` is the number of samples\nand `n_features` is the number of features." @@ -63138,7 +62590,7 @@ "qname": "sklearn.decomposition._dict_learning.SparseCoder.transform.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -63150,7 +62602,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Encode the data as a sparse combination of the dictionary atoms.\n\nCoding method is determined by the object parameter\n`transform_algorithm`.", "docstring": "Encode the data as a sparse combination of the dictionary atoms.\n\n Coding method is determined by the object parameter\n `transform_algorithm`.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Training vector, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n X_new : ndarray of shape (n_samples, n_components)\n Transformed data.\n " }, @@ -63556,7 +63008,7 @@ }, "type": { "kind": "EnumType", - "values": ["lasso_cd", "lasso_lars", "threshold", "lars", "omp"] + "values": ["lasso_cd", "lasso_lars", "threshold", "omp", "lars"] } }, { @@ -63932,7 +63384,7 @@ }, "type": { "kind": "EnumType", - "values": ["cd", "lars"] + "values": ["lars", "cd"] } }, { @@ -64334,7 +63786,7 @@ }, "type": { "kind": "EnumType", - "values": ["cd", "lars"] + "values": ["lars", "cd"] } }, { @@ -64599,7 +64051,7 @@ }, "type": { "kind": "EnumType", - "values": ["lasso_cd", "lasso_lars", "threshold", "lars", "omp"] + "values": ["lasso_cd", "lasso_lars", "threshold", "omp", "lars"] } }, { @@ -64764,7 +64216,7 @@ "qname": "sklearn.decomposition._factor_analysis.FactorAnalysis.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -64777,7 +64229,7 @@ "qname": "sklearn.decomposition._factor_analysis.FactorAnalysis.__init__.n_components", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Dimensionality of latent space, the number of components\nof ``X`` that are obtained after ``transform``.\nIf None, n_components is set to the number of features." @@ -64793,7 +64245,7 @@ "qname": "sklearn.decomposition._factor_analysis.FactorAnalysis.__init__.tol", "default_value": "0.01", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-2", "description": "Stopping tolerance for log-likelihood increase." @@ -64809,7 +64261,7 @@ "qname": "sklearn.decomposition._factor_analysis.FactorAnalysis.__init__.copy", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to make a copy of X. If ``False``, the input X gets overwritten\nduring fitting." @@ -64825,7 +64277,7 @@ "qname": "sklearn.decomposition._factor_analysis.FactorAnalysis.__init__.max_iter", "default_value": "1000", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1000", "description": "Maximum number of iterations." @@ -64841,7 +64293,7 @@ "qname": "sklearn.decomposition._factor_analysis.FactorAnalysis.__init__.noise_variance_init", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (n_features,), default=None", "description": "The initial guess of the noise variance for each feature.\nIf None, it defaults to np.ones(n_features)." @@ -64857,14 +64309,14 @@ "qname": "sklearn.decomposition._factor_analysis.FactorAnalysis.__init__.svd_method", "default_value": "'randomized'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'lapack', 'randomized'}, default='randomized'", "description": "Which SVD method to use. If 'lapack' use standard SVD from\nscipy.linalg, if 'randomized' use fast ``randomized_svd`` function.\nDefaults to 'randomized'. For most applications 'randomized' will\nbe sufficiently precise while providing significant speed gains.\nAccuracy can also be improved by setting higher values for\n`iterated_power`. If this is not sufficient, for maximum precision\nyou should choose 'lapack'." }, "type": { "kind": "EnumType", - "values": ["randomized", "lapack"] + "values": ["lapack", "randomized"] } }, { @@ -64873,7 +64325,7 @@ "qname": "sklearn.decomposition._factor_analysis.FactorAnalysis.__init__.iterated_power", "default_value": "3", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=3", "description": "Number of iterations for the power method. 3 by default. Only used\nif ``svd_method`` equals 'randomized'." @@ -64889,7 +64341,7 @@ "qname": "sklearn.decomposition._factor_analysis.FactorAnalysis.__init__.rotation", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'varimax', 'quartimax'}, default=None", "description": "If not None, apply the indicated rotation. Currently, varimax and\nquartimax are implemented. See\n`\"The varimax criterion for analytic rotation in factor analysis\"\n`_\nH. F. Kaiser, 1958.\n\n.. versionadded:: 0.24" @@ -64905,7 +64357,7 @@ "qname": "sklearn.decomposition._factor_analysis.FactorAnalysis.__init__.random_state", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or RandomState instance, default=0", "description": "Only used when ``svd_method`` equals 'randomized'. Pass an int for\nreproducible results across multiple function calls.\nSee :term:`Glossary `." @@ -64926,7 +64378,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -65031,7 +64483,7 @@ "qname": "sklearn.decomposition._factor_analysis.FactorAnalysis.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -65044,7 +64496,7 @@ "qname": "sklearn.decomposition._factor_analysis.FactorAnalysis.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training data." @@ -65060,7 +64512,7 @@ "qname": "sklearn.decomposition._factor_analysis.FactorAnalysis.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Ignored parameter." @@ -65072,7 +64524,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the FactorAnalysis model to X using SVD based approach.", "docstring": "Fit the FactorAnalysis model to X using SVD based approach.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data.\n\n y : Ignored\n Ignored parameter.\n\n Returns\n -------\n self : object\n FactorAnalysis class instance.\n " }, @@ -65088,7 +64540,7 @@ "qname": "sklearn.decomposition._factor_analysis.FactorAnalysis.get_covariance.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -65097,7 +64549,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute data covariance with the FactorAnalysis model.\n\n``cov = components_.T * components_ + diag(noise_variance)``", "docstring": "Compute data covariance with the FactorAnalysis model.\n\n ``cov = components_.T * components_ + diag(noise_variance)``\n\n Returns\n -------\n cov : ndarray of shape (n_features, n_features)\n Estimated covariance of data.\n " }, @@ -65113,7 +64565,7 @@ "qname": "sklearn.decomposition._factor_analysis.FactorAnalysis.get_precision.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -65122,7 +64574,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute data precision matrix with the FactorAnalysis model.", "docstring": "Compute data precision matrix with the FactorAnalysis model.\n\n Returns\n -------\n precision : ndarray of shape (n_features, n_features)\n Estimated precision of data.\n " }, @@ -65138,7 +64590,7 @@ "qname": "sklearn.decomposition._factor_analysis.FactorAnalysis.score.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -65151,7 +64603,7 @@ "qname": "sklearn.decomposition._factor_analysis.FactorAnalysis.score.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (n_samples, n_features)", "description": "The data." @@ -65167,7 +64619,7 @@ "qname": "sklearn.decomposition._factor_analysis.FactorAnalysis.score.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Ignored parameter." @@ -65179,7 +64631,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute the average log-likelihood of the samples.", "docstring": "Compute the average log-likelihood of the samples.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n The data.\n\n y : Ignored\n Ignored parameter.\n\n Returns\n -------\n ll : float\n Average log-likelihood of the samples under the current model.\n " }, @@ -65195,7 +64647,7 @@ "qname": "sklearn.decomposition._factor_analysis.FactorAnalysis.score_samples.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -65208,7 +64660,7 @@ "qname": "sklearn.decomposition._factor_analysis.FactorAnalysis.score_samples.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (n_samples, n_features)", "description": "The data." @@ -65220,7 +64672,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute the log-likelihood of each sample.", "docstring": "Compute the log-likelihood of each sample.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n The data.\n\n Returns\n -------\n ll : ndarray of shape (n_samples,)\n Log-likelihood of each sample under the current model.\n " }, @@ -65236,7 +64688,7 @@ "qname": "sklearn.decomposition._factor_analysis.FactorAnalysis.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -65249,7 +64701,7 @@ "qname": "sklearn.decomposition._factor_analysis.FactorAnalysis.transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training data." @@ -65261,7 +64713,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Apply dimensionality reduction to X using the model.\n\nCompute the expected mean of the latent variables.\nSee Barber, 21.2.33 (or Bishop, 12.66).", "docstring": "Apply dimensionality reduction to X using the model.\n\n Compute the expected mean of the latent variables.\n See Barber, 21.2.33 (or Bishop, 12.66).\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data.\n\n Returns\n -------\n X_new : ndarray of shape (n_samples, n_components)\n The latent variables of X.\n " }, @@ -65341,7 +64793,7 @@ "qname": "sklearn.decomposition._fastica.FastICA.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -65354,7 +64806,7 @@ "qname": "sklearn.decomposition._fastica.FastICA.__init__.n_components", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Number of components to use. If None is passed, all are used." @@ -65370,14 +64822,14 @@ "qname": "sklearn.decomposition._fastica.FastICA.__init__.algorithm", "default_value": "'parallel'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'parallel', 'deflation'}, default='parallel'", - "description": "Specify which algorithm to use for FastICA." + "description": "Apply parallel or deflational algorithm for FastICA." }, "type": { "kind": "EnumType", - "values": ["parallel", "deflation"] + "values": ["deflation", "parallel"] } }, { @@ -65386,10 +64838,10 @@ "qname": "sklearn.decomposition._fastica.FastICA.__init__.whiten", "default_value": "'warn'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str or bool, default=\"warn\"", - "description": "Specify the whitening strategy to use.\n\n- If 'arbitrary-variance' (default), a whitening with variance\n arbitrary is used.\n- If 'unit-variance', the whitening matrix is rescaled to ensure that\n each recovered source has unit variance.\n- If False, the data is already considered to be whitened, and no\n whitening is performed.\n\n.. deprecated:: 1.1\n Starting in v1.3, `whiten='unit-variance'` will be used by default.\n `whiten=True` is deprecated from 1.1 and will raise ValueError in 1.3.\n Use `whiten=arbitrary-variance` instead." + "description": "Specify the whitening strategy to use.\nIf 'arbitrary-variance' (default), a whitening with variance arbitrary is used.\nIf 'unit-variance', the whitening matrix is rescaled to ensure that each\nrecovered source has unit variance.\nIf False, the data is already considered to be whitened, and no\nwhitening is performed.\n\n.. deprecated:: 1.1\n From version 1.3 whiten='unit-variance' will be used by default.\n `whiten=True` is deprecated from 1.1 and will raise ValueError in 1.3.\n Use `whiten=arbitrary-variance` instead." }, "type": { "kind": "UnionType", @@ -65411,17 +64863,17 @@ "qname": "sklearn.decomposition._fastica.FastICA.__init__.fun", "default_value": "'logcosh'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'logcosh', 'exp', 'cube'} or callable, default='logcosh'", - "description": "The functional form of the G function used in the\napproximation to neg-entropy. Could be either 'logcosh', 'exp',\nor 'cube'.\nYou can also provide your own function. It should return a tuple\ncontaining the value of the function, and of its derivative, in the\npoint. The derivative should be averaged along its last dimension.\nExample::\n\n def my_g(x):\n return x ** 3, (3 * x ** 2).mean(axis=-1)" + "description": "The functional form of the G function used in the\napproximation to neg-entropy. Could be either 'logcosh', 'exp',\nor 'cube'.\nYou can also provide your own function. It should return a tuple\ncontaining the value of the function, and of its derivative, in the\npoint. Example::\n\n def my_g(x):\n return x ** 3, (3 * x ** 2).mean(axis=-1)" }, "type": { "kind": "UnionType", "types": [ { "kind": "EnumType", - "values": ["exp", "logcosh", "cube"] + "values": ["logcosh", "cube", "exp"] }, { "kind": "NamedType", @@ -65436,10 +64888,10 @@ "qname": "sklearn.decomposition._fastica.FastICA.__init__.fun_args", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict, default=None", - "description": "Arguments to send to the functional form.\nIf empty or None and if fun='logcosh', fun_args will take value\n{'alpha' : 1.0}." + "description": "Arguments to send to the functional form.\nIf empty and if fun='logcosh', fun_args will take value\n{'alpha' : 1.0}." }, "type": { "kind": "NamedType", @@ -65452,7 +64904,7 @@ "qname": "sklearn.decomposition._fastica.FastICA.__init__.max_iter", "default_value": "200", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=200", "description": "Maximum number of iterations during fit." @@ -65468,10 +64920,10 @@ "qname": "sklearn.decomposition._fastica.FastICA.__init__.tol", "default_value": "0.0001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-4", - "description": "A positive scalar giving the tolerance at which the\nun-mixing matrix is considered to have converged." + "description": "Tolerance on update at each iteration." }, "type": { "kind": "NamedType", @@ -65484,10 +64936,10 @@ "qname": "sklearn.decomposition._fastica.FastICA.__init__.w_init", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (n_components, n_components), default=None", - "description": "Initial un-mixing array. If `w_init=None`, then an array of values\ndrawn from a normal distribution is used." + "description": "The mixing matrix to be used to initialize the algorithm." }, "type": { "kind": "NamedType", @@ -65500,7 +64952,7 @@ "qname": "sklearn.decomposition._fastica.FastICA.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Used to initialize ``w_init`` when not specified, with a\nnormal distribution. Pass an int, for reproducible results\nacross multiple function calls.\nSee :term:`Glossary `." @@ -65525,7 +64977,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -65648,7 +65100,7 @@ "qname": "sklearn.decomposition._fastica.FastICA.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -65661,7 +65113,7 @@ "qname": "sklearn.decomposition._fastica.FastICA.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training data, where `n_samples` is the number of samples\nand `n_features` is the number of features." @@ -65677,7 +65129,7 @@ "qname": "sklearn.decomposition._fastica.FastICA.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -65689,7 +65141,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the model to X.", "docstring": "Fit the model to X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n " }, @@ -65705,7 +65157,7 @@ "qname": "sklearn.decomposition._fastica.FastICA.fit_transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -65718,7 +65170,7 @@ "qname": "sklearn.decomposition._fastica.FastICA.fit_transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training data, where `n_samples` is the number of samples\nand `n_features` is the number of features." @@ -65734,7 +65186,7 @@ "qname": "sklearn.decomposition._fastica.FastICA.fit_transform.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -65746,7 +65198,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the model and recover the sources from X.", "docstring": "Fit the model and recover the sources from X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n X_new : ndarray of shape (n_samples, n_components)\n Estimated sources obtained by transforming the data with the\n estimated unmixing matrix.\n " }, @@ -65762,7 +65214,7 @@ "qname": "sklearn.decomposition._fastica.FastICA.inverse_transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -65775,7 +65227,7 @@ "qname": "sklearn.decomposition._fastica.FastICA.inverse_transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_components)", "description": "Sources, where `n_samples` is the number of samples\nand `n_components` is the number of components." @@ -65791,7 +65243,7 @@ "qname": "sklearn.decomposition._fastica.FastICA.inverse_transform.copy", "default_value": "True", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If False, data passed to fit are overwritten. Defaults to True." @@ -65803,7 +65255,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Transform the sources back to the mixed data (apply mixing matrix).", "docstring": "Transform the sources back to the mixed data (apply mixing matrix).\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_components)\n Sources, where `n_samples` is the number of samples\n and `n_components` is the number of components.\n copy : bool, default=True\n If False, data passed to fit are overwritten. Defaults to True.\n\n Returns\n -------\n X_new : ndarray of shape (n_samples, n_features)\n Reconstructed data obtained with the mixing matrix.\n " }, @@ -65819,7 +65271,7 @@ "qname": "sklearn.decomposition._fastica.FastICA.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -65832,7 +65284,7 @@ "qname": "sklearn.decomposition._fastica.FastICA.transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Data to transform, where `n_samples` is the number of samples\nand `n_features` is the number of features." @@ -65848,7 +65300,7 @@ "qname": "sklearn.decomposition._fastica.FastICA.transform.copy", "default_value": "True", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If False, data passed to fit can be overwritten. Defaults to True." @@ -65860,7 +65312,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Recover the sources from X (apply the unmixing matrix).", "docstring": "Recover the sources from X (apply the unmixing matrix).\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Data to transform, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n copy : bool, default=True\n If False, data passed to fit can be overwritten. Defaults to True.\n\n Returns\n -------\n X_new : ndarray of shape (n_samples, n_components)\n Estimated sources obtained by transforming the data with the\n estimated unmixing matrix.\n " }, @@ -66274,7 +65726,7 @@ "is_public": true, "docstring": { "type": "int, default=None", - "description": "Number of components to use. If None is passed, all are used." + "description": "Number of components to extract. If None no dimension reduction\nis performed." }, "type": { "kind": "NamedType", @@ -66290,11 +65742,11 @@ "is_public": true, "docstring": { "type": "{'parallel', 'deflation'}, default='parallel'", - "description": "Specify which algorithm to use for FastICA." + "description": "Apply a parallel or deflational FASTICA algorithm." }, "type": { "kind": "EnumType", - "values": ["parallel", "deflation"] + "values": ["deflation", "parallel"] } }, { @@ -66306,7 +65758,7 @@ "is_public": true, "docstring": { "type": "str or bool, default=\"warn\"", - "description": "Specify the whitening strategy to use.\n\n- If 'arbitrary-variance' (default), a whitening with variance\n arbitrary is used.\n- If 'unit-variance', the whitening matrix is rescaled to ensure that\n each recovered source has unit variance.\n- If False, the data is already considered to be whitened, and no\n whitening is performed.\n\n.. deprecated:: 1.1\n Starting in v1.3, `whiten='unit-variance'` will be used by default.\n `whiten=True` is deprecated from 1.1 and will raise ValueError in 1.3.\n Use `whiten=arbitrary-variance` instead." + "description": "Specify the whitening strategy to use.\nIf 'arbitrary-variance' (default), a whitening with variance arbitrary is used.\nIf 'unit-variance', the whitening matrix is rescaled to ensure that each\nrecovered source has unit variance.\nIf False, the data is already considered to be whitened, and no\nwhitening is performed.\n\n.. deprecated:: 1.1\n From version 1.3, `whiten='unit-variance'` will be used by default.\n `whiten=True` is deprecated from 1.1 and will raise ValueError in 1.3.\n Use `whiten=arbitrary-variance` instead." }, "type": { "kind": "UnionType", @@ -66331,14 +65783,14 @@ "is_public": true, "docstring": { "type": "{'logcosh', 'exp', 'cube'} or callable, default='logcosh'", - "description": "The functional form of the G function used in the\napproximation to neg-entropy. Could be either 'logcosh', 'exp',\nor 'cube'.\nYou can also provide your own function. It should return a tuple\ncontaining the value of the function, and of its derivative, in the\npoint. The derivative should be averaged along its last dimension.\nExample::\n\n def my_g(x):\n return x ** 3, (3 * x ** 2).mean(axis=-1)" + "description": "The functional form of the G function used in the\napproximation to neg-entropy. Could be either 'logcosh', 'exp',\nor 'cube'.\nYou can also provide your own function. It should return a tuple\ncontaining the value of the function, and of its derivative, in the\npoint. The derivative should be averaged along its last dimension.\nExample:\n\ndef my_g(x):\n return x ** 3, np.mean(3 * x ** 2, axis=-1)" }, "type": { "kind": "UnionType", "types": [ { "kind": "EnumType", - "values": ["exp", "logcosh", "cube"] + "values": ["logcosh", "cube", "exp"] }, { "kind": "NamedType", @@ -66387,7 +65839,7 @@ "assigned_by": "NAME_ONLY", "is_public": true, "docstring": { - "type": "float, default=1e-4", + "type": "float, default=1e-04", "description": "A positive scalar giving the tolerance at which the\nun-mixing matrix is considered to have converged." }, "type": { @@ -66404,7 +65856,7 @@ "is_public": true, "docstring": { "type": "ndarray of shape (n_components, n_components), default=None", - "description": "Initial un-mixing array. If `w_init=None`, then an array of values\ndrawn from a normal distribution is used." + "description": "Initial un-mixing array of dimension (n.comp,n.comp).\nIf None (default) then an array of normal r.v.'s is used." }, "type": { "kind": "NamedType", @@ -66492,7 +65944,7 @@ "results": [], "is_public": true, "description": "Perform Fast Independent Component Analysis.\n\nThe implementation is based on [1]_.\n\nRead more in the :ref:`User Guide `.", - "docstring": "Perform Fast Independent Component Analysis.\n\n The implementation is based on [1]_.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training vector, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n n_components : int, default=None\n Number of components to use. If None is passed, all are used.\n\n algorithm : {'parallel', 'deflation'}, default='parallel'\n Specify which algorithm to use for FastICA.\n\n whiten : str or bool, default=\"warn\"\n Specify the whitening strategy to use.\n\n - If 'arbitrary-variance' (default), a whitening with variance\n arbitrary is used.\n - If 'unit-variance', the whitening matrix is rescaled to ensure that\n each recovered source has unit variance.\n - If False, the data is already considered to be whitened, and no\n whitening is performed.\n\n .. deprecated:: 1.1\n Starting in v1.3, `whiten='unit-variance'` will be used by default.\n `whiten=True` is deprecated from 1.1 and will raise ValueError in 1.3.\n Use `whiten=arbitrary-variance` instead.\n\n fun : {'logcosh', 'exp', 'cube'} or callable, default='logcosh'\n The functional form of the G function used in the\n approximation to neg-entropy. Could be either 'logcosh', 'exp',\n or 'cube'.\n You can also provide your own function. It should return a tuple\n containing the value of the function, and of its derivative, in the\n point. The derivative should be averaged along its last dimension.\n Example::\n\n def my_g(x):\n return x ** 3, (3 * x ** 2).mean(axis=-1)\n\n fun_args : dict, default=None\n Arguments to send to the functional form.\n If empty or None and if fun='logcosh', fun_args will take value\n {'alpha' : 1.0}.\n\n max_iter : int, default=200\n Maximum number of iterations to perform.\n\n tol : float, default=1e-4\n A positive scalar giving the tolerance at which the\n un-mixing matrix is considered to have converged.\n\n w_init : ndarray of shape (n_components, n_components), default=None\n Initial un-mixing array. If `w_init=None`, then an array of values\n drawn from a normal distribution is used.\n\n random_state : int, RandomState instance or None, default=None\n Used to initialize ``w_init`` when not specified, with a\n normal distribution. Pass an int, for reproducible results\n across multiple function calls.\n See :term:`Glossary `.\n\n return_X_mean : bool, default=False\n If True, X_mean is returned too.\n\n compute_sources : bool, default=True\n If False, sources are not computed, but only the rotation matrix.\n This can save memory when working with big data. Defaults to True.\n\n return_n_iter : bool, default=False\n Whether or not to return the number of iterations.\n\n Returns\n -------\n K : ndarray of shape (n_components, n_features) or None\n If whiten is 'True', K is the pre-whitening matrix that projects data\n onto the first n_components principal components. If whiten is 'False',\n K is 'None'.\n\n W : ndarray of shape (n_components, n_components)\n The square matrix that unmixes the data after whitening.\n The mixing matrix is the pseudo-inverse of matrix ``W K``\n if K is not None, else it is the inverse of W.\n\n S : ndarray of shape (n_samples, n_components) or None\n Estimated source matrix.\n\n X_mean : ndarray of shape (n_features,)\n The mean over features. Returned only if return_X_mean is True.\n\n n_iter : int\n If the algorithm is \"deflation\", n_iter is the\n maximum number of iterations run across all components. Else\n they are just the number of iterations taken to converge. This is\n returned only when return_n_iter is set to `True`.\n\n Notes\n -----\n The data matrix X is considered to be a linear combination of\n non-Gaussian (independent) components i.e. X = AS where columns of S\n contain the independent components and A is a linear mixing\n matrix. In short ICA attempts to `un-mix' the data by estimating an\n un-mixing matrix W where ``S = W K X.``\n While FastICA was proposed to estimate as many sources\n as features, it is possible to estimate less by setting\n n_components < n_features. It this case K is not a square matrix\n and the estimated A is the pseudo-inverse of ``W K``.\n\n This implementation was originally made for data of shape\n [n_features, n_samples]. Now the input is transposed\n before the algorithm is applied. This makes it slightly\n faster for Fortran-ordered input.\n\n References\n ----------\n .. [1] A. Hyvarinen and E. Oja, \"Fast Independent Component Analysis\",\n Algorithms and Applications, Neural Networks, 13(4-5), 2000,\n pp. 411-430.\n " + "docstring": "Perform Fast Independent Component Analysis.\n\n The implementation is based on [1]_.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training vector, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n n_components : int, default=None\n Number of components to extract. If None no dimension reduction\n is performed.\n\n algorithm : {'parallel', 'deflation'}, default='parallel'\n Apply a parallel or deflational FASTICA algorithm.\n\n whiten : str or bool, default=\"warn\"\n Specify the whitening strategy to use.\n If 'arbitrary-variance' (default), a whitening with variance arbitrary is used.\n If 'unit-variance', the whitening matrix is rescaled to ensure that each\n recovered source has unit variance.\n If False, the data is already considered to be whitened, and no\n whitening is performed.\n\n .. deprecated:: 1.1\n From version 1.3, `whiten='unit-variance'` will be used by default.\n `whiten=True` is deprecated from 1.1 and will raise ValueError in 1.3.\n Use `whiten=arbitrary-variance` instead.\n\n fun : {'logcosh', 'exp', 'cube'} or callable, default='logcosh'\n The functional form of the G function used in the\n approximation to neg-entropy. Could be either 'logcosh', 'exp',\n or 'cube'.\n You can also provide your own function. It should return a tuple\n containing the value of the function, and of its derivative, in the\n point. The derivative should be averaged along its last dimension.\n Example:\n\n def my_g(x):\n return x ** 3, np.mean(3 * x ** 2, axis=-1)\n\n fun_args : dict, default=None\n Arguments to send to the functional form.\n If empty or None and if fun='logcosh', fun_args will take value\n {'alpha' : 1.0}.\n\n max_iter : int, default=200\n Maximum number of iterations to perform.\n\n tol : float, default=1e-04\n A positive scalar giving the tolerance at which the\n un-mixing matrix is considered to have converged.\n\n w_init : ndarray of shape (n_components, n_components), default=None\n Initial un-mixing array of dimension (n.comp,n.comp).\n If None (default) then an array of normal r.v.'s is used.\n\n random_state : int, RandomState instance or None, default=None\n Used to initialize ``w_init`` when not specified, with a\n normal distribution. Pass an int, for reproducible results\n across multiple function calls.\n See :term:`Glossary `.\n\n return_X_mean : bool, default=False\n If True, X_mean is returned too.\n\n compute_sources : bool, default=True\n If False, sources are not computed, but only the rotation matrix.\n This can save memory when working with big data. Defaults to True.\n\n return_n_iter : bool, default=False\n Whether or not to return the number of iterations.\n\n Returns\n -------\n K : ndarray of shape (n_components, n_features) or None\n If whiten is 'True', K is the pre-whitening matrix that projects data\n onto the first n_components principal components. If whiten is 'False',\n K is 'None'.\n\n W : ndarray of shape (n_components, n_components)\n The square matrix that unmixes the data after whitening.\n The mixing matrix is the pseudo-inverse of matrix ``W K``\n if K is not None, else it is the inverse of W.\n\n S : ndarray of shape (n_samples, n_components) or None\n Estimated source matrix.\n\n X_mean : ndarray of shape (n_features,)\n The mean over features. Returned only if return_X_mean is True.\n\n n_iter : int\n If the algorithm is \"deflation\", n_iter is the\n maximum number of iterations run across all components. Else\n they are just the number of iterations taken to converge. This is\n returned only when return_n_iter is set to `True`.\n\n Notes\n -----\n The data matrix X is considered to be a linear combination of\n non-Gaussian (independent) components i.e. X = AS where columns of S\n contain the independent components and A is a linear mixing\n matrix. In short ICA attempts to `un-mix' the data by estimating an\n un-mixing matrix W where ``S = W K X.``\n While FastICA was proposed to estimate as many sources\n as features, it is possible to estimate less by setting\n n_components < n_features. It this case K is not a square matrix\n and the estimated A is the pseudo-inverse of ``W K``.\n\n This implementation was originally made for data of shape\n [n_features, n_samples]. Now the input is transposed\n before the algorithm is applied. This makes it slightly\n faster for Fortran-ordered input.\n\n References\n ----------\n .. [1] A. Hyvarinen and E. Oja, \"Fast Independent Component Analysis\",\n Algorithms and Applications, Neural Networks, 13(4-5), 2000,\n pp. 411-430.\n " }, { "id": "sklearn/sklearn.decomposition._incremental_pca/IncrementalPCA/__init__", @@ -66506,7 +65958,7 @@ "qname": "sklearn.decomposition._incremental_pca.IncrementalPCA.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -66519,7 +65971,7 @@ "qname": "sklearn.decomposition._incremental_pca.IncrementalPCA.__init__.n_components", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Number of components to keep. If ``n_components`` is ``None``,\nthen ``n_components`` is set to ``min(n_samples, n_features)``." @@ -66535,7 +65987,7 @@ "qname": "sklearn.decomposition._incremental_pca.IncrementalPCA.__init__.whiten", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "When True (False by default) the ``components_`` vectors are divided\nby ``n_samples`` times ``components_`` to ensure uncorrelated outputs\nwith unit component-wise variances.\n\nWhitening will remove some information from the transformed signal\n(the relative variance scales of the components) but can sometimes\nimprove the predictive accuracy of the downstream estimators by\nmaking data respect some hard-wired assumptions." @@ -66551,7 +66003,7 @@ "qname": "sklearn.decomposition._incremental_pca.IncrementalPCA.__init__.copy", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If False, X will be overwritten. ``copy=False`` can be used to\nsave memory but is unsafe for general use." @@ -66567,7 +66019,7 @@ "qname": "sklearn.decomposition._incremental_pca.IncrementalPCA.__init__.batch_size", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "The number of samples to use for each batch. Only used when calling\n``fit``. If ``batch_size`` is ``None``, then ``batch_size``\nis inferred from the data and set to ``5 * n_features``, to provide a\nbalance between approximation accuracy and memory consumption." @@ -66579,7 +66031,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -66595,7 +66047,7 @@ "qname": "sklearn.decomposition._incremental_pca.IncrementalPCA.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -66608,7 +66060,7 @@ "qname": "sklearn.decomposition._incremental_pca.IncrementalPCA.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Training data, where `n_samples` is the number of samples and\n`n_features` is the number of features." @@ -66633,7 +66085,7 @@ "qname": "sklearn.decomposition._incremental_pca.IncrementalPCA.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -66645,7 +66097,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the model with X, using minibatches of size batch_size.", "docstring": "Fit the model with X, using minibatches of size batch_size.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training data, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n " }, @@ -66661,7 +66113,7 @@ "qname": "sklearn.decomposition._incremental_pca.IncrementalPCA.partial_fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -66674,7 +66126,7 @@ "qname": "sklearn.decomposition._incremental_pca.IncrementalPCA.partial_fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training data, where `n_samples` is the number of samples and\n`n_features` is the number of features." @@ -66690,7 +66142,7 @@ "qname": "sklearn.decomposition._incremental_pca.IncrementalPCA.partial_fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -66706,7 +66158,7 @@ "qname": "sklearn.decomposition._incremental_pca.IncrementalPCA.partial_fit.check_input", "default_value": "True", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Run check_array on X." @@ -66718,7 +66170,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Incremental fit with X. All of X is processed as a single batch.", "docstring": "Incremental fit with X. All of X is processed as a single batch.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n check_input : bool, default=True\n Run check_array on X.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n " }, @@ -66734,7 +66186,7 @@ "qname": "sklearn.decomposition._incremental_pca.IncrementalPCA.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -66747,7 +66199,7 @@ "qname": "sklearn.decomposition._incremental_pca.IncrementalPCA.transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "New data, where `n_samples` is the number of samples\nand `n_features` is the number of features." @@ -66768,7 +66220,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Apply dimensionality reduction to X.\n\nX is projected on the first principal components previously extracted\nfrom a training set, using minibatches of size batch_size if X is\nsparse.", "docstring": "Apply dimensionality reduction to X.\n\n X is projected on the first principal components previously extracted\n from a training set, using minibatches of size batch_size if X is\n sparse.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n New data, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n Returns\n -------\n X_new : ndarray of shape (n_samples, n_components)\n Projection of X in the first principal components.\n\n Examples\n --------\n\n >>> import numpy as np\n >>> from sklearn.decomposition import IncrementalPCA\n >>> X = np.array([[-1, -1], [-2, -1], [-3, -2],\n ... [1, 1], [2, 1], [3, 2]])\n >>> ipca = IncrementalPCA(n_components=2, batch_size=3)\n >>> ipca.fit(X)\n IncrementalPCA(batch_size=3, n_components=2)\n >>> ipca.transform(X) # doctest: +SKIP\n " }, @@ -66784,7 +66236,7 @@ "qname": "sklearn.decomposition._kernel_pca.KernelPCA.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -66797,7 +66249,7 @@ "qname": "sklearn.decomposition._kernel_pca.KernelPCA.__init__.n_components", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Number of components. If None, all non-zero components are kept." @@ -66813,14 +66265,14 @@ "qname": "sklearn.decomposition._kernel_pca.KernelPCA.__init__.kernel", "default_value": "'linear'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'linear', 'poly', 'rbf', 'sigmoid', 'cosine', 'precomputed'}, default='linear'", "description": "Kernel used for PCA." }, "type": { "kind": "EnumType", - "values": ["sigmoid", "cosine", "linear", "precomputed", "rbf", "poly"] + "values": ["rbf", "precomputed", "poly", "sigmoid", "linear", "cosine"] } }, { @@ -66829,7 +66281,7 @@ "qname": "sklearn.decomposition._kernel_pca.KernelPCA.__init__.gamma", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=None", "description": "Kernel coefficient for rbf, poly and sigmoid kernels. Ignored by other\nkernels. If ``gamma`` is ``None``, then it is set to ``1/n_features``." @@ -66845,7 +66297,7 @@ "qname": "sklearn.decomposition._kernel_pca.KernelPCA.__init__.degree", "default_value": "3", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=3", "description": "Degree for poly kernels. Ignored by other kernels." @@ -66861,7 +66313,7 @@ "qname": "sklearn.decomposition._kernel_pca.KernelPCA.__init__.coef0", "default_value": "1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1", "description": "Independent term in poly and sigmoid kernels.\nIgnored by other kernels." @@ -66877,7 +66329,7 @@ "qname": "sklearn.decomposition._kernel_pca.KernelPCA.__init__.kernel_params", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict, default=None", "description": "Parameters (keyword arguments) and\nvalues for kernel passed as callable object.\nIgnored by other kernels." @@ -66893,7 +66345,7 @@ "qname": "sklearn.decomposition._kernel_pca.KernelPCA.__init__.alpha", "default_value": "1.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1.0", "description": "Hyperparameter of the ridge regression that learns the\ninverse transform (when fit_inverse_transform=True)." @@ -66909,7 +66361,7 @@ "qname": "sklearn.decomposition._kernel_pca.KernelPCA.__init__.fit_inverse_transform", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Learn the inverse transform for non-precomputed kernels\n(i.e. learn to find the pre-image of a point). This method is based\non [2]_." @@ -66925,14 +66377,14 @@ "qname": "sklearn.decomposition._kernel_pca.KernelPCA.__init__.eigen_solver", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'auto', 'dense', 'arpack', 'randomized'}, default='auto'", "description": "Select eigensolver to use. If `n_components` is much\nless than the number of training samples, randomized (or arpack to a\nsmaller extent) may be more efficient than the dense eigensolver.\nRandomized SVD is performed according to the method of Halko et al\n[3]_.\n\nauto :\n the solver is selected by a default policy based on n_samples\n (the number of training samples) and `n_components`:\n if the number of components to extract is less than 10 (strict) and\n the number of samples is more than 200 (strict), the 'arpack'\n method is enabled. Otherwise the exact full eigenvalue\n decomposition is computed and optionally truncated afterwards\n ('dense' method).\ndense :\n run exact full eigenvalue decomposition calling the standard\n LAPACK solver via `scipy.linalg.eigh`, and select the components\n by postprocessing\narpack :\n run SVD truncated to n_components calling ARPACK solver using\n `scipy.sparse.linalg.eigsh`. It requires strictly\n 0 < n_components < n_samples\nrandomized :\n run randomized SVD by the method of Halko et al. [3]_. The current\n implementation selects eigenvalues based on their module; therefore\n using this method can lead to unexpected results if the kernel is\n not positive semi-definite. See also [4]_.\n\n.. versionchanged:: 1.0\n `'randomized'` was added." }, "type": { "kind": "EnumType", - "values": ["auto", "arpack", "randomized", "dense"] + "values": ["arpack", "auto", "randomized", "dense"] } }, { @@ -66941,7 +66393,7 @@ "qname": "sklearn.decomposition._kernel_pca.KernelPCA.__init__.tol", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0", "description": "Convergence tolerance for arpack.\nIf 0, optimal value will be chosen by arpack." @@ -66957,7 +66409,7 @@ "qname": "sklearn.decomposition._kernel_pca.KernelPCA.__init__.max_iter", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Maximum number of iterations for arpack.\nIf None, optimal value will be chosen by arpack." @@ -66973,7 +66425,7 @@ "qname": "sklearn.decomposition._kernel_pca.KernelPCA.__init__.iterated_power", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int >= 0, or 'auto', default='auto'", "description": "Number of iterations for the power method computed by\nsvd_solver == 'randomized'. When 'auto', it is set to 7 when\n`n_components < 0.1 * min(X.shape)`, other it is set to 4.\n\n.. versionadded:: 1.0" @@ -66998,7 +66450,7 @@ "qname": "sklearn.decomposition._kernel_pca.KernelPCA.__init__.remove_zero_eig", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If True, then all components with zero eigenvalues are removed, so\nthat the number of components in the output may be < n_components\n(and sometimes even zero due to numerical instability).\nWhen n_components is None, this parameter is ignored and components\nwith zero eigenvalues are removed regardless." @@ -67014,7 +66466,7 @@ "qname": "sklearn.decomposition._kernel_pca.KernelPCA.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Used when ``eigen_solver`` == 'arpack' or 'randomized'. Pass an int\nfor reproducible results across multiple function calls.\nSee :term:`Glossary `.\n\n.. versionadded:: 0.18" @@ -67043,7 +66495,7 @@ "qname": "sklearn.decomposition._kernel_pca.KernelPCA.__init__.copy_X", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If True, input X is copied and stored by the model in the `X_fit_`\nattribute. If no further changes will be done to X, setting\n`copy_X=False` saves memory by storing a reference.\n\n.. versionadded:: 0.18" @@ -67059,7 +66511,7 @@ "qname": "sklearn.decomposition._kernel_pca.KernelPCA.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "The number of parallel jobs to run.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details.\n\n.. versionadded:: 0.18" @@ -67071,7 +66523,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -67280,7 +66732,7 @@ "qname": "sklearn.decomposition._kernel_pca.KernelPCA.alphas_.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -67289,7 +66741,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -67305,7 +66757,7 @@ "qname": "sklearn.decomposition._kernel_pca.KernelPCA.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -67318,7 +66770,7 @@ "qname": "sklearn.decomposition._kernel_pca.KernelPCA.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Training vector, where `n_samples` is the number of samples\nand `n_features` is the number of features." @@ -67343,7 +66795,7 @@ "qname": "sklearn.decomposition._kernel_pca.KernelPCA.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -67355,7 +66807,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the model from data in X.", "docstring": "Fit the model from data in X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vector, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n " }, @@ -67371,7 +66823,7 @@ "qname": "sklearn.decomposition._kernel_pca.KernelPCA.fit_transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -67384,7 +66836,7 @@ "qname": "sklearn.decomposition._kernel_pca.KernelPCA.fit_transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Training vector, where `n_samples` is the number of samples\nand `n_features` is the number of features." @@ -67409,7 +66861,7 @@ "qname": "sklearn.decomposition._kernel_pca.KernelPCA.fit_transform.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -67421,7 +66873,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the model from data in X and transform X.", "docstring": "Fit the model from data in X and transform X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vector, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n **params : kwargs\n Parameters (keyword arguments) and values passed to\n the fit_transform instance.\n\n Returns\n -------\n X_new : ndarray of shape (n_samples, n_components)\n Returns the instance itself.\n " }, @@ -67437,7 +66889,7 @@ "qname": "sklearn.decomposition._kernel_pca.KernelPCA.inverse_transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -67450,7 +66902,7 @@ "qname": "sklearn.decomposition._kernel_pca.KernelPCA.inverse_transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_components)", "description": "Training vector, where `n_samples` is the number of samples\nand `n_features` is the number of features." @@ -67471,7 +66923,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Transform X back to original space.\n\n``inverse_transform`` approximates the inverse transformation using\na learned pre-image. The pre-image is learned by kernel ridge\nregression of the original data on their low-dimensional representation\nvectors.\n\n.. note:\n :meth:`~sklearn.decomposition.fit` internally uses a centered\n kernel. As the centered kernel no longer contains the information\n of the mean of kernel features, such information is not taken into\n account in reconstruction.\n\n.. note::\n When users want to compute inverse transformation for 'linear'\n kernel, it is recommended that they use\n :class:`~sklearn.decomposition.PCA` instead. Unlike\n :class:`~sklearn.decomposition.PCA`,\n :class:`~sklearn.decomposition.KernelPCA`'s ``inverse_transform``\n does not reconstruct the mean of data when 'linear' kernel is used\n due to the use of centered kernel.", "docstring": "Transform X back to original space.\n\n ``inverse_transform`` approximates the inverse transformation using\n a learned pre-image. The pre-image is learned by kernel ridge\n regression of the original data on their low-dimensional representation\n vectors.\n\n .. note:\n :meth:`~sklearn.decomposition.fit` internally uses a centered\n kernel. As the centered kernel no longer contains the information\n of the mean of kernel features, such information is not taken into\n account in reconstruction.\n\n .. note::\n When users want to compute inverse transformation for 'linear'\n kernel, it is recommended that they use\n :class:`~sklearn.decomposition.PCA` instead. Unlike\n :class:`~sklearn.decomposition.PCA`,\n :class:`~sklearn.decomposition.KernelPCA`'s ``inverse_transform``\n does not reconstruct the mean of data when 'linear' kernel is used\n due to the use of centered kernel.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_components)\n Training vector, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n Returns\n -------\n X_new : ndarray of shape (n_samples, n_features)\n Returns the instance itself.\n\n References\n ----------\n `Bak\u0131r, G\u00f6khan H., Jason Weston, and Bernhard Sch\u00f6lkopf.\n \"Learning to find pre-images.\"\n Advances in neural information processing systems 16 (2004): 449-456.\n `_\n " }, @@ -67490,7 +66942,7 @@ "qname": "sklearn.decomposition._kernel_pca.KernelPCA.lambdas_.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -67499,7 +66951,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -67515,7 +66967,7 @@ "qname": "sklearn.decomposition._kernel_pca.KernelPCA.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -67528,7 +66980,7 @@ "qname": "sklearn.decomposition._kernel_pca.KernelPCA.transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Training vector, where `n_samples` is the number of samples\nand `n_features` is the number of features." @@ -67549,7 +67001,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Transform X.", "docstring": "Transform X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vector, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n Returns\n -------\n X_new : ndarray of shape (n_samples, n_components)\n Returns the instance itself.\n " }, @@ -67565,7 +67017,7 @@ "qname": "sklearn.decomposition._lda.LatentDirichletAllocation.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -67578,7 +67030,7 @@ "qname": "sklearn.decomposition._lda.LatentDirichletAllocation.__init__.n_components", "default_value": "10", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=10", "description": "Number of topics.\n\n.. versionchanged:: 0.19\n ``n_topics`` was renamed to ``n_components``" @@ -67594,7 +67046,7 @@ "qname": "sklearn.decomposition._lda.LatentDirichletAllocation.__init__.doc_topic_prior", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=None", "description": "Prior of document topic distribution `theta`. If the value is None,\ndefaults to `1 / n_components`.\nIn [1]_, this is called `alpha`." @@ -67610,7 +67062,7 @@ "qname": "sklearn.decomposition._lda.LatentDirichletAllocation.__init__.topic_word_prior", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=None", "description": "Prior of topic word distribution `beta`. If the value is None, defaults\nto `1 / n_components`.\nIn [1]_, this is called `eta`." @@ -67626,14 +67078,14 @@ "qname": "sklearn.decomposition._lda.LatentDirichletAllocation.__init__.learning_method", "default_value": "'batch'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'batch', 'online'}, default='batch'", "description": "Method used to update `_component`. Only used in :meth:`fit` method.\nIn general, if the data size is large, the online update will be much\nfaster than the batch update.\n\nValid options::\n\n 'batch': Batch variational Bayes method. Use all training data in\n each EM update.\n Old `components_` will be overwritten in each iteration.\n 'online': Online variational Bayes method. In each EM update, use\n mini-batch of training data to update the ``components_``\n variable incrementally. The learning rate is controlled by the\n ``learning_decay`` and the ``learning_offset`` parameters.\n\n.. versionchanged:: 0.20\n The default learning method is now ``\"batch\"``." }, "type": { "kind": "EnumType", - "values": ["batch", "online"] + "values": ["online", "batch"] } }, { @@ -67642,7 +67094,7 @@ "qname": "sklearn.decomposition._lda.LatentDirichletAllocation.__init__.learning_decay", "default_value": "0.7", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.7", "description": "It is a parameter that control learning rate in the online learning\nmethod. The value should be set between (0.5, 1.0] to guarantee\nasymptotic convergence. When the value is 0.0 and batch_size is\n``n_samples``, the update method is same as batch learning. In the\nliterature, this is called kappa." @@ -67658,7 +67110,7 @@ "qname": "sklearn.decomposition._lda.LatentDirichletAllocation.__init__.learning_offset", "default_value": "10.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=10.0", "description": "A (positive) parameter that downweights early iterations in online\nlearning. It should be greater than 1.0. In the literature, this is\ncalled tau_0." @@ -67674,7 +67126,7 @@ "qname": "sklearn.decomposition._lda.LatentDirichletAllocation.__init__.max_iter", "default_value": "10", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=10", "description": "The maximum number of passes over the training data (aka epochs).\nIt only impacts the behavior in the :meth:`fit` method, and not the\n:meth:`partial_fit` method." @@ -67690,7 +67142,7 @@ "qname": "sklearn.decomposition._lda.LatentDirichletAllocation.__init__.batch_size", "default_value": "128", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=128", "description": "Number of documents to use in each EM iteration. Only used in online\nlearning." @@ -67706,7 +67158,7 @@ "qname": "sklearn.decomposition._lda.LatentDirichletAllocation.__init__.evaluate_every", "default_value": "-1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=-1", "description": "How often to evaluate perplexity. Only used in `fit` method.\nset it to 0 or negative number to not evaluate perplexity in\ntraining at all. Evaluating perplexity can help you check convergence\nin training process, but it will also increase total training time.\nEvaluating perplexity in every iteration might increase training time\nup to two-fold." @@ -67722,7 +67174,7 @@ "qname": "sklearn.decomposition._lda.LatentDirichletAllocation.__init__.total_samples", "default_value": "1000000.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1e6", "description": "Total number of documents. Only used in the :meth:`partial_fit` method." @@ -67738,7 +67190,7 @@ "qname": "sklearn.decomposition._lda.LatentDirichletAllocation.__init__.perp_tol", "default_value": "0.1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-1", "description": "Perplexity tolerance in batch learning. Only used when\n``evaluate_every`` is greater than 0." @@ -67754,7 +67206,7 @@ "qname": "sklearn.decomposition._lda.LatentDirichletAllocation.__init__.mean_change_tol", "default_value": "0.001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-3", "description": "Stopping tolerance for updating document topic distribution in E-step." @@ -67770,7 +67222,7 @@ "qname": "sklearn.decomposition._lda.LatentDirichletAllocation.__init__.max_doc_update_iter", "default_value": "100", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=100", "description": "Max number of iterations for updating document topic distribution in\nthe E-step." @@ -67786,7 +67238,7 @@ "qname": "sklearn.decomposition._lda.LatentDirichletAllocation.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "The number of jobs to use in the E-step.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details." @@ -67802,7 +67254,7 @@ "qname": "sklearn.decomposition._lda.LatentDirichletAllocation.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "Verbosity level." @@ -67818,7 +67270,7 @@ "qname": "sklearn.decomposition._lda.LatentDirichletAllocation.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Pass an int for reproducible results across multiple function calls.\nSee :term:`Glossary `." @@ -67843,7 +67295,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -68455,7 +67907,7 @@ "qname": "sklearn.decomposition._lda.LatentDirichletAllocation.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -68468,7 +67920,7 @@ "qname": "sklearn.decomposition._lda.LatentDirichletAllocation.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Document word matrix." @@ -68493,7 +67945,7 @@ "qname": "sklearn.decomposition._lda.LatentDirichletAllocation.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present here for API consistency by convention." @@ -68505,7 +67957,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Learn model for the data X with variational Bayes method.\n\nWhen `learning_method` is 'online', use mini-batch update.\nOtherwise, use batch update.", "docstring": "Learn model for the data X with variational Bayes method.\n\n When `learning_method` is 'online', use mini-batch update.\n Otherwise, use batch update.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Document word matrix.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n self\n Fitted estimator.\n " }, @@ -68521,7 +67973,7 @@ "qname": "sklearn.decomposition._lda.LatentDirichletAllocation.partial_fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -68534,7 +67986,7 @@ "qname": "sklearn.decomposition._lda.LatentDirichletAllocation.partial_fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Document word matrix." @@ -68559,7 +68011,7 @@ "qname": "sklearn.decomposition._lda.LatentDirichletAllocation.partial_fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present here for API consistency by convention." @@ -68571,7 +68023,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Online VB with Mini-Batch update.", "docstring": "Online VB with Mini-Batch update.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Document word matrix.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n self\n Partially fitted estimator.\n " }, @@ -68587,7 +68039,7 @@ "qname": "sklearn.decomposition._lda.LatentDirichletAllocation.perplexity.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -68600,7 +68052,7 @@ "qname": "sklearn.decomposition._lda.LatentDirichletAllocation.perplexity.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Document word matrix." @@ -68625,7 +68077,7 @@ "qname": "sklearn.decomposition._lda.LatentDirichletAllocation.perplexity.sub_sampling", "default_value": "False", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "bool", "description": "Do sub-sampling or not." @@ -68637,7 +68089,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Calculate approximate perplexity for data X.\n\nPerplexity is defined as exp(-1. * log-likelihood per word)\n\n.. versionchanged:: 0.19\n *doc_topic_distr* argument has been deprecated and is ignored\n because user no longer has access to unnormalized distribution", "docstring": "Calculate approximate perplexity for data X.\n\n Perplexity is defined as exp(-1. * log-likelihood per word)\n\n .. versionchanged:: 0.19\n *doc_topic_distr* argument has been deprecated and is ignored\n because user no longer has access to unnormalized distribution\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Document word matrix.\n\n sub_sampling : bool\n Do sub-sampling or not.\n\n Returns\n -------\n score : float\n Perplexity score.\n " }, @@ -68653,7 +68105,7 @@ "qname": "sklearn.decomposition._lda.LatentDirichletAllocation.score.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -68666,7 +68118,7 @@ "qname": "sklearn.decomposition._lda.LatentDirichletAllocation.score.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Document word matrix." @@ -68691,7 +68143,7 @@ "qname": "sklearn.decomposition._lda.LatentDirichletAllocation.score.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present here for API consistency by convention." @@ -68703,7 +68155,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Calculate approximate log-likelihood as score.", "docstring": "Calculate approximate log-likelihood as score.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Document word matrix.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n score : float\n Use approximate bound as score.\n " }, @@ -68719,7 +68171,7 @@ "qname": "sklearn.decomposition._lda.LatentDirichletAllocation.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -68732,7 +68184,7 @@ "qname": "sklearn.decomposition._lda.LatentDirichletAllocation.transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Document word matrix." @@ -68753,7 +68205,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Transform data X according to the fitted model.\n\n .. versionchanged:: 0.18\n *doc_topic_distr* is now normalized", "docstring": "Transform data X according to the fitted model.\n\n .. versionchanged:: 0.18\n *doc_topic_distr* is now normalized\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Document word matrix.\n\n Returns\n -------\n doc_topic_distr : ndarray of shape (n_samples, n_components)\n Document topic distribution for X.\n " }, @@ -68911,7 +68363,7 @@ "qname": "sklearn.decomposition._nmf.MiniBatchNMF.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -68924,7 +68376,7 @@ "qname": "sklearn.decomposition._nmf.MiniBatchNMF.__init__.n_components", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Number of components, if `n_components` is not set all features\nare kept." @@ -68940,14 +68392,14 @@ "qname": "sklearn.decomposition._nmf.MiniBatchNMF.__init__.init", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'random', 'nndsvd', 'nndsvda', 'nndsvdar', 'custom'}, default=None", "description": "Method used to initialize the procedure.\nValid options:\n\n- `None`: 'nndsvda' if `n_components <= min(n_samples, n_features)`,\n otherwise random.\n\n- `'random'`: non-negative random matrices, scaled with:\n `sqrt(X.mean() / n_components)`\n\n- `'nndsvd'`: Nonnegative Double Singular Value Decomposition (NNDSVD)\n initialization (better for sparseness).\n\n- `'nndsvda'`: NNDSVD with zeros filled with the average of X\n (better when sparsity is not desired).\n\n- `'nndsvdar'` NNDSVD with zeros filled with small random values\n (generally faster, less accurate alternative to NNDSVDa\n for when sparsity is not desired).\n\n- `'custom'`: use custom matrices `W` and `H`" }, "type": { "kind": "EnumType", - "values": ["nndsvda", "nndsvd", "custom", "nndsvdar", "random"] + "values": ["random", "custom", "nndsvda", "nndsvd", "nndsvdar"] } }, { @@ -68956,7 +68408,7 @@ "qname": "sklearn.decomposition._nmf.MiniBatchNMF.__init__.batch_size", "default_value": "1024", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1024", "description": "Number of samples in each mini-batch. Large batch sizes\ngive better long-term convergence at the cost of a slower start." @@ -68972,7 +68424,7 @@ "qname": "sklearn.decomposition._nmf.MiniBatchNMF.__init__.beta_loss", "default_value": "'frobenius'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float or {'frobenius', 'kullback-leibler', 'itakura-saito'}, default='frobenius'", "description": "Beta divergence to be minimized, measuring the distance between `X`\nand the dot product `WH`. Note that values different from 'frobenius'\n(or 2) and 'kullback-leibler' (or 1) lead to significantly slower\nfits. Note that for `beta_loss <= 0` (or 'itakura-saito'), the input\nmatrix `X` cannot contain zeros." @@ -68982,7 +68434,7 @@ "types": [ { "kind": "EnumType", - "values": ["frobenius", "itakura-saito", "kullback-leibler"] + "values": ["itakura-saito", "kullback-leibler", "frobenius"] }, { "kind": "NamedType", @@ -68997,7 +68449,7 @@ "qname": "sklearn.decomposition._nmf.MiniBatchNMF.__init__.tol", "default_value": "0.0001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-4", "description": "Control early stopping based on the norm of the differences in `H`\nbetween 2 steps. To disable early stopping based on changes in `H`, set\n`tol` to 0.0." @@ -69013,7 +68465,7 @@ "qname": "sklearn.decomposition._nmf.MiniBatchNMF.__init__.max_no_improvement", "default_value": "10", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=10", "description": "Control early stopping based on the consecutive number of mini batches\nthat does not yield an improvement on the smoothed cost function.\nTo disable convergence detection based on cost function, set\n`max_no_improvement` to None." @@ -69029,7 +68481,7 @@ "qname": "sklearn.decomposition._nmf.MiniBatchNMF.__init__.max_iter", "default_value": "200", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=200", "description": "Maximum number of iterations over the complete dataset before\ntiming out." @@ -69045,7 +68497,7 @@ "qname": "sklearn.decomposition._nmf.MiniBatchNMF.__init__.alpha_W", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0", "description": "Constant that multiplies the regularization terms of `W`. Set it to zero\n(default) to have no regularization on `W`." @@ -69061,7 +68513,7 @@ "qname": "sklearn.decomposition._nmf.MiniBatchNMF.__init__.alpha_H", "default_value": "'same'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float or \"same\", default=\"same\"", "description": "Constant that multiplies the regularization terms of `H`. Set it to zero to\nhave no regularization on `H`. If \"same\" (default), it takes the same value as\n`alpha_W`." @@ -69086,7 +68538,7 @@ "qname": "sklearn.decomposition._nmf.MiniBatchNMF.__init__.l1_ratio", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0", "description": "The regularization mixing parameter, with 0 <= l1_ratio <= 1.\nFor l1_ratio = 0 the penalty is an elementwise L2 penalty\n(aka Frobenius Norm).\nFor l1_ratio = 1 it is an elementwise L1 penalty.\nFor 0 < l1_ratio < 1, the penalty is a combination of L1 and L2." @@ -69102,7 +68554,7 @@ "qname": "sklearn.decomposition._nmf.MiniBatchNMF.__init__.forget_factor", "default_value": "0.7", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.7", "description": "Amount of rescaling of past information. Its value could be 1 with\nfinite datasets. Choosing values < 1 is recommended with online\nlearning as more recent batches will weight more than past batches." @@ -69118,7 +68570,7 @@ "qname": "sklearn.decomposition._nmf.MiniBatchNMF.__init__.fresh_restarts", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Whether to completely solve for W at each step. Doing fresh restarts will likely\nlead to a better solution for a same number of iterations but it is much slower." @@ -69134,7 +68586,7 @@ "qname": "sklearn.decomposition._nmf.MiniBatchNMF.__init__.fresh_restarts_max_iter", "default_value": "30", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=30", "description": "Maximum number of iterations when solving for W at each step. Only used when\ndoing fresh restarts. These iterations may be stopped early based on a small\nchange of W controlled by `tol`." @@ -69150,7 +68602,7 @@ "qname": "sklearn.decomposition._nmf.MiniBatchNMF.__init__.transform_max_iter", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Maximum number of iterations when solving for W at transform time.\nIf None, it defaults to `max_iter`." @@ -69166,7 +68618,7 @@ "qname": "sklearn.decomposition._nmf.MiniBatchNMF.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Used for initialisation (when ``init`` == 'nndsvdar' or\n'random'), and in Coordinate Descent. Pass an int for reproducible\nresults across multiple function calls.\nSee :term:`Glossary `." @@ -69195,7 +68647,7 @@ "qname": "sklearn.decomposition._nmf.MiniBatchNMF.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Whether to be verbose." @@ -69207,7 +68659,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -69616,7 +69068,7 @@ "qname": "sklearn.decomposition._nmf.MiniBatchNMF.fit_transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -69629,7 +69081,7 @@ "qname": "sklearn.decomposition._nmf.MiniBatchNMF.fit_transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Data matrix to be decomposed." @@ -69654,7 +69106,7 @@ "qname": "sklearn.decomposition._nmf.MiniBatchNMF.fit_transform.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present here for API consistency by convention." @@ -69670,7 +69122,7 @@ "qname": "sklearn.decomposition._nmf.MiniBatchNMF.fit_transform.W", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_components), default=None", "description": "If `init='custom'`, it is used as initial guess for the solution." @@ -69686,7 +69138,7 @@ "qname": "sklearn.decomposition._nmf.MiniBatchNMF.fit_transform.H", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_components, n_features), default=None", "description": "If `init='custom'`, it is used as initial guess for the solution." @@ -69698,7 +69150,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Learn a NMF model for the data X and returns the transformed data.\n\nThis is more efficient than calling fit followed by transform.", "docstring": "Learn a NMF model for the data X and returns the transformed data.\n\n This is more efficient than calling fit followed by transform.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Data matrix to be decomposed.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n W : array-like of shape (n_samples, n_components), default=None\n If `init='custom'`, it is used as initial guess for the solution.\n\n H : array-like of shape (n_components, n_features), default=None\n If `init='custom'`, it is used as initial guess for the solution.\n\n Returns\n -------\n W : ndarray of shape (n_samples, n_components)\n Transformed data.\n " }, @@ -69714,7 +69166,7 @@ "qname": "sklearn.decomposition._nmf.MiniBatchNMF.partial_fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -69727,7 +69179,7 @@ "qname": "sklearn.decomposition._nmf.MiniBatchNMF.partial_fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Data matrix to be decomposed." @@ -69752,7 +69204,7 @@ "qname": "sklearn.decomposition._nmf.MiniBatchNMF.partial_fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present here for API consistency by convention." @@ -69768,7 +69220,7 @@ "qname": "sklearn.decomposition._nmf.MiniBatchNMF.partial_fit.W", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_components), default=None", "description": "If `init='custom'`, it is used as initial guess for the solution.\nOnly used for the first call to `partial_fit`." @@ -69784,7 +69236,7 @@ "qname": "sklearn.decomposition._nmf.MiniBatchNMF.partial_fit.H", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_components, n_features), default=None", "description": "If `init='custom'`, it is used as initial guess for the solution.\nOnly used for the first call to `partial_fit`." @@ -69796,7 +69248,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Update the model using the data in `X` as a mini-batch.\n\nThis method is expected to be called several times consecutively\non different chunks of a dataset so as to implement out-of-core\nor online learning.\n\nThis is especially useful when the whole dataset is too big to fit in\nmemory at once (see :ref:`scaling_strategies`).", "docstring": "Update the model using the data in `X` as a mini-batch.\n\n This method is expected to be called several times consecutively\n on different chunks of a dataset so as to implement out-of-core\n or online learning.\n\n This is especially useful when the whole dataset is too big to fit in\n memory at once (see :ref:`scaling_strategies`).\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Data matrix to be decomposed.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n W : array-like of shape (n_samples, n_components), default=None\n If `init='custom'`, it is used as initial guess for the solution.\n Only used for the first call to `partial_fit`.\n\n H : array-like of shape (n_components, n_features), default=None\n If `init='custom'`, it is used as initial guess for the solution.\n Only used for the first call to `partial_fit`.\n\n Returns\n -------\n self\n Returns the instance itself.\n " }, @@ -69812,7 +69264,7 @@ "qname": "sklearn.decomposition._nmf.MiniBatchNMF.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -69825,7 +69277,7 @@ "qname": "sklearn.decomposition._nmf.MiniBatchNMF.transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Data matrix to be transformed by the model." @@ -69846,7 +69298,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Transform the data X according to the fitted MiniBatchNMF model.", "docstring": "Transform the data X according to the fitted MiniBatchNMF model.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Data matrix to be transformed by the model.\n\n Returns\n -------\n W : ndarray of shape (n_samples, n_components)\n Transformed data.\n " }, @@ -69862,7 +69314,7 @@ "qname": "sklearn.decomposition._nmf.NMF.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -69875,7 +69327,7 @@ "qname": "sklearn.decomposition._nmf.NMF.__init__.n_components", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Number of components, if n_components is not set all features\nare kept." @@ -69891,14 +69343,14 @@ "qname": "sklearn.decomposition._nmf.NMF.__init__.init", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'random', 'nndsvd', 'nndsvda', 'nndsvdar', 'custom'}, default=None", "description": "Method used to initialize the procedure.\nDefault: None.\nValid options:\n\n- `None`: 'nndsvda' if n_components <= min(n_samples, n_features),\n otherwise random.\n\n- `'random'`: non-negative random matrices, scaled with:\n sqrt(X.mean() / n_components)\n\n- `'nndsvd'`: Nonnegative Double Singular Value Decomposition (NNDSVD)\n initialization (better for sparseness)\n\n- `'nndsvda'`: NNDSVD with zeros filled with the average of X\n (better when sparsity is not desired)\n\n- `'nndsvdar'` NNDSVD with zeros filled with small random values\n (generally faster, less accurate alternative to NNDSVDa\n for when sparsity is not desired)\n\n- `'custom'`: use custom matrices W and H\n\n.. versionchanged:: 1.1\n When `init=None` and n_components is less than n_samples and n_features\n defaults to `nndsvda` instead of `nndsvd`." }, "type": { "kind": "EnumType", - "values": ["nndsvda", "nndsvd", "custom", "nndsvdar", "random"] + "values": ["random", "custom", "nndsvda", "nndsvd", "nndsvdar"] } }, { @@ -69907,7 +69359,7 @@ "qname": "sklearn.decomposition._nmf.NMF.__init__.solver", "default_value": "'cd'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'cd', 'mu'}, default='cd'", "description": "Numerical solver to use:\n'cd' is a Coordinate Descent solver.\n'mu' is a Multiplicative Update solver.\n\n.. versionadded:: 0.17\n Coordinate Descent solver.\n\n.. versionadded:: 0.19\n Multiplicative Update solver." @@ -69923,7 +69375,7 @@ "qname": "sklearn.decomposition._nmf.NMF.__init__.beta_loss", "default_value": "'frobenius'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float or {'frobenius', 'kullback-leibler', 'itakura-saito'}, default='frobenius'", "description": "Beta divergence to be minimized, measuring the distance between X\nand the dot product WH. Note that values different from 'frobenius'\n(or 2) and 'kullback-leibler' (or 1) lead to significantly slower\nfits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input\nmatrix X cannot contain zeros. Used only in 'mu' solver.\n\n.. versionadded:: 0.19" @@ -69933,7 +69385,7 @@ "types": [ { "kind": "EnumType", - "values": ["frobenius", "itakura-saito", "kullback-leibler"] + "values": ["itakura-saito", "kullback-leibler", "frobenius"] }, { "kind": "NamedType", @@ -69948,7 +69400,7 @@ "qname": "sklearn.decomposition._nmf.NMF.__init__.tol", "default_value": "0.0001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-4", "description": "Tolerance of the stopping condition." @@ -69964,7 +69416,7 @@ "qname": "sklearn.decomposition._nmf.NMF.__init__.max_iter", "default_value": "200", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=200", "description": "Maximum number of iterations before timing out." @@ -69980,7 +69432,7 @@ "qname": "sklearn.decomposition._nmf.NMF.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Used for initialisation (when ``init`` == 'nndsvdar' or\n'random'), and in Coordinate Descent. Pass an int for reproducible\nresults across multiple function calls.\nSee :term:`Glossary `." @@ -70009,7 +69461,7 @@ "qname": "sklearn.decomposition._nmf.NMF.__init__.alpha", "default_value": "'deprecated'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0", "description": "Constant that multiplies the regularization terms. Set it to zero to\nhave no regularization. When using `alpha` instead of `alpha_W` and `alpha_H`,\nthe regularization terms are not scaled by the `n_features` (resp. `n_samples`)\nfactors for `W` (resp. `H`).\n\n.. versionadded:: 0.17\n *alpha* used in the Coordinate Descent solver.\n\n.. deprecated:: 1.0\n The `alpha` parameter is deprecated in 1.0 and will be removed in 1.2.\n Use `alpha_W` and `alpha_H` instead." @@ -70025,7 +69477,7 @@ "qname": "sklearn.decomposition._nmf.NMF.__init__.alpha_W", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0", "description": "Constant that multiplies the regularization terms of `W`. Set it to zero\n(default) to have no regularization on `W`.\n\n.. versionadded:: 1.0" @@ -70041,7 +69493,7 @@ "qname": "sklearn.decomposition._nmf.NMF.__init__.alpha_H", "default_value": "'same'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float or \"same\", default=\"same\"", "description": "Constant that multiplies the regularization terms of `H`. Set it to zero to\nhave no regularization on `H`. If \"same\" (default), it takes the same value as\n`alpha_W`.\n\n.. versionadded:: 1.0" @@ -70066,7 +69518,7 @@ "qname": "sklearn.decomposition._nmf.NMF.__init__.l1_ratio", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0", "description": "The regularization mixing parameter, with 0 <= l1_ratio <= 1.\nFor l1_ratio = 0 the penalty is an elementwise L2 penalty\n(aka Frobenius Norm).\nFor l1_ratio = 1 it is an elementwise L1 penalty.\nFor 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.\n\n.. versionadded:: 0.17\n Regularization parameter *l1_ratio* used in the Coordinate Descent\n solver." @@ -70082,7 +69534,7 @@ "qname": "sklearn.decomposition._nmf.NMF.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "Whether to be verbose." @@ -70098,7 +69550,7 @@ "qname": "sklearn.decomposition._nmf.NMF.__init__.shuffle", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If true, randomize the order of coordinates in the CD solver.\n\n.. versionadded:: 0.17\n *shuffle* parameter used in the Coordinate Descent solver." @@ -70114,7 +69566,7 @@ "qname": "sklearn.decomposition._nmf.NMF.__init__.regularization", "default_value": "'deprecated'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'both', 'components', 'transformation', None}, default='both'", "description": "Select whether the regularization affects the components (H), the\ntransformation (W), both or none of them.\n\n.. versionadded:: 0.24\n\n.. deprecated:: 1.0\n The `regularization` parameter is deprecated in 1.0 and will be removed in\n 1.2. Use `alpha_W` and `alpha_H` instead." @@ -70126,7 +69578,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -70459,7 +69911,7 @@ "qname": "sklearn.decomposition._nmf.NMF.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -70472,7 +69924,7 @@ "qname": "sklearn.decomposition._nmf.NMF.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Training vector, where `n_samples` is the number of samples\nand `n_features` is the number of features." @@ -70497,7 +69949,7 @@ "qname": "sklearn.decomposition._nmf.NMF.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -70509,7 +69961,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Learn a NMF model for the data X.", "docstring": "Learn a NMF model for the data X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vector, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n **params : kwargs\n Parameters (keyword arguments) and values passed to\n the fit_transform instance.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n " }, @@ -70525,7 +69977,7 @@ "qname": "sklearn.decomposition._nmf.NMF.fit_transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -70538,7 +69990,7 @@ "qname": "sklearn.decomposition._nmf.NMF.fit_transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Training vector, where `n_samples` is the number of samples\nand `n_features` is the number of features." @@ -70563,7 +70015,7 @@ "qname": "sklearn.decomposition._nmf.NMF.fit_transform.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -70579,7 +70031,7 @@ "qname": "sklearn.decomposition._nmf.NMF.fit_transform.W", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_components)", "description": "If init='custom', it is used as initial guess for the solution." @@ -70595,7 +70047,7 @@ "qname": "sklearn.decomposition._nmf.NMF.fit_transform.H", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_components, n_features)", "description": "If init='custom', it is used as initial guess for the solution." @@ -70607,7 +70059,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Learn a NMF model for the data X and returns the transformed data.\n\nThis is more efficient than calling fit followed by transform.", "docstring": "Learn a NMF model for the data X and returns the transformed data.\n\n This is more efficient than calling fit followed by transform.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vector, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n W : array-like of shape (n_samples, n_components)\n If init='custom', it is used as initial guess for the solution.\n\n H : array-like of shape (n_components, n_features)\n If init='custom', it is used as initial guess for the solution.\n\n Returns\n -------\n W : ndarray of shape (n_samples, n_components)\n Transformed data.\n " }, @@ -70623,7 +70075,7 @@ "qname": "sklearn.decomposition._nmf.NMF.inverse_transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -70636,7 +70088,7 @@ "qname": "sklearn.decomposition._nmf.NMF.inverse_transform.W", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{ndarray, sparse matrix} of shape (n_samples, n_components)", "description": "Transformed data matrix." @@ -70657,7 +70109,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Transform data back to its original space.\n\n.. versionadded:: 0.18", "docstring": "Transform data back to its original space.\n\n .. versionadded:: 0.18\n\n Parameters\n ----------\n W : {ndarray, sparse matrix} of shape (n_samples, n_components)\n Transformed data matrix.\n\n Returns\n -------\n X : {ndarray, sparse matrix} of shape (n_samples, n_features)\n Returns a data matrix of the original shape.\n " }, @@ -70673,7 +70125,7 @@ "qname": "sklearn.decomposition._nmf.NMF.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -70686,7 +70138,7 @@ "qname": "sklearn.decomposition._nmf.NMF.transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Training vector, where `n_samples` is the number of samples\nand `n_features` is the number of features." @@ -70707,7 +70159,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Transform the data X according to the fitted NMF model.", "docstring": "Transform the data X according to the fitted NMF model.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vector, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n Returns\n -------\n W : ndarray of shape (n_samples, n_components)\n Transformed data.\n " }, @@ -70808,7 +70260,7 @@ "types": [ { "kind": "EnumType", - "values": ["frobenius", "itakura-saito", "kullback-leibler"] + "values": ["itakura-saito", "kullback-leibler", "frobenius"] }, { "kind": "NamedType", @@ -71295,7 +70747,7 @@ "types": [ { "kind": "EnumType", - "values": ["frobenius", "itakura-saito", "kullback-leibler"] + "values": ["itakura-saito", "kullback-leibler", "frobenius"] }, { "kind": "NamedType", @@ -71489,7 +70941,7 @@ }, "type": { "kind": "EnumType", - "values": ["nndsvdar", "nndsvda", "nndsvd", "random"] + "values": ["nndsvd", "nndsvda", "random", "nndsvdar"] } }, { @@ -72077,7 +71529,7 @@ }, "type": { "kind": "EnumType", - "values": ["nndsvda", "nndsvd", "custom", "nndsvdar", "random"] + "values": ["random", "custom", "nndsvda", "nndsvd", "nndsvdar"] } }, { @@ -72128,7 +71580,7 @@ "types": [ { "kind": "EnumType", - "values": ["frobenius", "itakura-saito", "kullback-leibler"] + "values": ["itakura-saito", "kullback-leibler", "frobenius"] }, { "kind": "NamedType", @@ -72409,7 +71861,7 @@ "qname": "sklearn.decomposition._pca.PCA.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -72422,7 +71874,7 @@ "qname": "sklearn.decomposition._pca.PCA.__init__.n_components", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, float or 'mle', default=None", "description": "Number of components to keep.\nif n_components is not set all components are kept::\n\n n_components == min(n_samples, n_features)\n\nIf ``n_components == 'mle'`` and ``svd_solver == 'full'``, Minka's\nMLE is used to guess the dimension. Use of ``n_components == 'mle'``\nwill interpret ``svd_solver == 'auto'`` as ``svd_solver == 'full'``.\n\nIf ``0 < n_components < 1`` and ``svd_solver == 'full'``, select the\nnumber of components such that the amount of variance that needs to be\nexplained is greater than the percentage specified by n_components.\n\nIf ``svd_solver == 'arpack'``, the number of components must be\nstrictly less than the minimum of n_features and n_samples.\n\nHence, the None case results in::\n\n n_components == min(n_samples, n_features) - 1" @@ -72451,7 +71903,7 @@ "qname": "sklearn.decomposition._pca.PCA.__init__.copy", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If False, data passed to fit are overwritten and running\nfit(X).transform(X) will not yield the expected results,\nuse fit_transform(X) instead." @@ -72467,7 +71919,7 @@ "qname": "sklearn.decomposition._pca.PCA.__init__.whiten", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "When True (False by default) the `components_` vectors are multiplied\nby the square root of n_samples and then divided by the singular values\nto ensure uncorrelated outputs with unit component-wise variances.\n\nWhitening will remove some information from the transformed signal\n(the relative variance scales of the components) but can sometime\nimprove the predictive accuracy of the downstream estimators by\nmaking their data respect some hard-wired assumptions." @@ -72483,14 +71935,14 @@ "qname": "sklearn.decomposition._pca.PCA.__init__.svd_solver", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'auto', 'full', 'arpack', 'randomized'}, default='auto'", "description": "If auto :\n The solver is selected by a default policy based on `X.shape` and\n `n_components`: if the input data is larger than 500x500 and the\n number of components to extract is lower than 80% of the smallest\n dimension of the data, then the more efficient 'randomized'\n method is enabled. Otherwise the exact full SVD is computed and\n optionally truncated afterwards.\nIf full :\n run exact full SVD calling the standard LAPACK solver via\n `scipy.linalg.svd` and select the components by postprocessing\nIf arpack :\n run SVD truncated to n_components calling ARPACK solver via\n `scipy.sparse.linalg.svds`. It requires strictly\n 0 < n_components < min(X.shape)\nIf randomized :\n run randomized SVD by the method of Halko et al.\n\n.. versionadded:: 0.18.0" }, "type": { "kind": "EnumType", - "values": ["auto", "arpack", "full", "randomized"] + "values": ["arpack", "auto", "randomized", "full"] } }, { @@ -72499,7 +71951,7 @@ "qname": "sklearn.decomposition._pca.PCA.__init__.tol", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0", "description": "Tolerance for singular values computed by svd_solver == 'arpack'.\nMust be of range [0.0, infinity).\n\n.. versionadded:: 0.18.0" @@ -72528,7 +71980,7 @@ "qname": "sklearn.decomposition._pca.PCA.__init__.iterated_power", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or 'auto', default='auto'", "description": "Number of iterations for the power method computed by\nsvd_solver == 'randomized'.\nMust be of range [0, infinity).\n\n.. versionadded:: 0.18.0" @@ -72561,7 +72013,7 @@ "qname": "sklearn.decomposition._pca.PCA.__init__.n_oversamples", "default_value": "10", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=10", "description": "This parameter is only relevant when `svd_solver=\"randomized\"`.\nIt corresponds to the additional number of random vectors to sample the\nrange of `X` so as to ensure proper conditioning. See\n:func:`~sklearn.utils.extmath.randomized_svd` for more details.\n\n.. versionadded:: 1.1" @@ -72577,7 +72029,7 @@ "qname": "sklearn.decomposition._pca.PCA.__init__.power_iteration_normalizer", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{\u2018auto\u2019, \u2018QR\u2019, \u2018LU\u2019, \u2018none\u2019}, default=\u2019auto\u2019", "description": "Power iteration normalizer for randomized SVD solver.\nNot used by ARPACK. See :func:`~sklearn.utils.extmath.randomized_svd`\nfor more details.\n\n.. versionadded:: 1.1" @@ -72593,7 +72045,7 @@ "qname": "sklearn.decomposition._pca.PCA.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Used when the 'arpack' or 'randomized' solvers are used. Pass an int\nfor reproducible results across multiple function calls.\nSee :term:`Glossary `.\n\n.. versionadded:: 0.18.0" @@ -72618,7 +72070,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -72812,7 +72264,7 @@ "qname": "sklearn.decomposition._pca.PCA.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -72825,7 +72277,7 @@ "qname": "sklearn.decomposition._pca.PCA.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training data, where `n_samples` is the number of samples\nand `n_features` is the number of features." @@ -72841,7 +72293,7 @@ "qname": "sklearn.decomposition._pca.PCA.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Ignored." @@ -72853,7 +72305,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the model with X.", "docstring": "Fit the model with X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n y : Ignored\n Ignored.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n " }, @@ -72869,7 +72321,7 @@ "qname": "sklearn.decomposition._pca.PCA.fit_transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -72882,7 +72334,7 @@ "qname": "sklearn.decomposition._pca.PCA.fit_transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training data, where `n_samples` is the number of samples\nand `n_features` is the number of features." @@ -72898,7 +72350,7 @@ "qname": "sklearn.decomposition._pca.PCA.fit_transform.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Ignored." @@ -72910,7 +72362,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the model with X and apply the dimensionality reduction on X.", "docstring": "Fit the model with X and apply the dimensionality reduction on X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n y : Ignored\n Ignored.\n\n Returns\n -------\n X_new : ndarray of shape (n_samples, n_components)\n Transformed values.\n\n Notes\n -----\n This method returns a Fortran-ordered array. To convert it to a\n C-ordered array, use 'np.ascontiguousarray'.\n " }, @@ -72926,7 +72378,7 @@ "qname": "sklearn.decomposition._pca.PCA.score.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -72939,7 +72391,7 @@ "qname": "sklearn.decomposition._pca.PCA.score.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "The data." @@ -72955,7 +72407,7 @@ "qname": "sklearn.decomposition._pca.PCA.score.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Ignored." @@ -72967,7 +72419,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Return the average log-likelihood of all samples.\n\nSee. \"Pattern Recognition and Machine Learning\"\nby C. Bishop, 12.2.1 p. 574\nor http://www.miketipping.com/papers/met-mppca.pdf", "docstring": "Return the average log-likelihood of all samples.\n\n See. \"Pattern Recognition and Machine Learning\"\n by C. Bishop, 12.2.1 p. 574\n or http://www.miketipping.com/papers/met-mppca.pdf\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The data.\n\n y : Ignored\n Ignored.\n\n Returns\n -------\n ll : float\n Average log-likelihood of the samples under the current model.\n " }, @@ -72983,7 +72435,7 @@ "qname": "sklearn.decomposition._pca.PCA.score_samples.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -72996,7 +72448,7 @@ "qname": "sklearn.decomposition._pca.PCA.score_samples.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "The data." @@ -73008,7 +72460,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Return the log-likelihood of each sample.\n\nSee. \"Pattern Recognition and Machine Learning\"\nby C. Bishop, 12.2.1 p. 574\nor http://www.miketipping.com/papers/met-mppca.pdf", "docstring": "Return the log-likelihood of each sample.\n\n See. \"Pattern Recognition and Machine Learning\"\n by C. Bishop, 12.2.1 p. 574\n or http://www.miketipping.com/papers/met-mppca.pdf\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The data.\n\n Returns\n -------\n ll : ndarray of shape (n_samples,)\n Log-likelihood of each sample under the current model.\n " }, @@ -73122,7 +72574,7 @@ "qname": "sklearn.decomposition._sparse_pca.MiniBatchSparsePCA.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -73135,7 +72587,7 @@ "qname": "sklearn.decomposition._sparse_pca.MiniBatchSparsePCA.__init__.n_components", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Number of sparse atoms to extract. If None, then ``n_components``\nis set to ``n_features``." @@ -73151,7 +72603,7 @@ "qname": "sklearn.decomposition._sparse_pca.MiniBatchSparsePCA.__init__.alpha", "default_value": "1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1", "description": "Sparsity controlling parameter. Higher values lead to sparser\ncomponents." @@ -73167,7 +72619,7 @@ "qname": "sklearn.decomposition._sparse_pca.MiniBatchSparsePCA.__init__.ridge_alpha", "default_value": "0.01", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.01", "description": "Amount of ridge shrinkage to apply in order to improve\nconditioning when calling the transform method." @@ -73183,7 +72635,7 @@ "qname": "sklearn.decomposition._sparse_pca.MiniBatchSparsePCA.__init__.n_iter", "default_value": "100", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=100", "description": "Number of iterations to perform for each mini batch." @@ -73199,7 +72651,7 @@ "qname": "sklearn.decomposition._sparse_pca.MiniBatchSparsePCA.__init__.callback", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "callable, default=None", "description": "Callable that gets invoked every five iterations." @@ -73215,7 +72667,7 @@ "qname": "sklearn.decomposition._sparse_pca.MiniBatchSparsePCA.__init__.batch_size", "default_value": "3", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=3", "description": "The number of features to take in each mini batch." @@ -73231,7 +72683,7 @@ "qname": "sklearn.decomposition._sparse_pca.MiniBatchSparsePCA.__init__.verbose", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or bool, default=False", "description": "Controls the verbosity; the higher, the more messages. Defaults to 0." @@ -73256,7 +72708,7 @@ "qname": "sklearn.decomposition._sparse_pca.MiniBatchSparsePCA.__init__.shuffle", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to shuffle the data before splitting it in batches." @@ -73272,7 +72724,7 @@ "qname": "sklearn.decomposition._sparse_pca.MiniBatchSparsePCA.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Number of parallel jobs to run.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details." @@ -73288,14 +72740,14 @@ "qname": "sklearn.decomposition._sparse_pca.MiniBatchSparsePCA.__init__.method", "default_value": "'lars'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'lars', 'cd'}, default='lars'", "description": "Method to be used for optimization.\nlars: uses the least angle regression method to solve the lasso problem\n(linear_model.lars_path)\ncd: uses the coordinate descent method to compute the\nLasso solution (linear_model.Lasso). Lars will be faster if\nthe estimated components are sparse." }, "type": { "kind": "EnumType", - "values": ["cd", "lars"] + "values": ["lars", "cd"] } }, { @@ -73304,7 +72756,7 @@ "qname": "sklearn.decomposition._sparse_pca.MiniBatchSparsePCA.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Used for random shuffling when ``shuffle`` is set to ``True``,\nduring online dictionary learning. Pass an int for reproducible results\nacross multiple function calls.\nSee :term:`Glossary `." @@ -73329,7 +72781,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -73345,7 +72797,7 @@ "qname": "sklearn.decomposition._sparse_pca.MiniBatchSparsePCA.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -73358,7 +72810,7 @@ "qname": "sklearn.decomposition._sparse_pca.MiniBatchSparsePCA.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training vector, where `n_samples` is the number of samples\nand `n_features` is the number of features." @@ -73374,7 +72826,7 @@ "qname": "sklearn.decomposition._sparse_pca.MiniBatchSparsePCA.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -73386,7 +72838,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the model from data in X.", "docstring": "Fit the model from data in X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training vector, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n " }, @@ -73402,7 +72854,7 @@ "qname": "sklearn.decomposition._sparse_pca.SparsePCA.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -73415,7 +72867,7 @@ "qname": "sklearn.decomposition._sparse_pca.SparsePCA.__init__.n_components", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Number of sparse atoms to extract. If None, then ``n_components``\nis set to ``n_features``." @@ -73431,7 +72883,7 @@ "qname": "sklearn.decomposition._sparse_pca.SparsePCA.__init__.alpha", "default_value": "1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1", "description": "Sparsity controlling parameter. Higher values lead to sparser\ncomponents." @@ -73447,7 +72899,7 @@ "qname": "sklearn.decomposition._sparse_pca.SparsePCA.__init__.ridge_alpha", "default_value": "0.01", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.01", "description": "Amount of ridge shrinkage to apply in order to improve\nconditioning when calling the transform method." @@ -73463,7 +72915,7 @@ "qname": "sklearn.decomposition._sparse_pca.SparsePCA.__init__.max_iter", "default_value": "1000", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1000", "description": "Maximum number of iterations to perform." @@ -73479,7 +72931,7 @@ "qname": "sklearn.decomposition._sparse_pca.SparsePCA.__init__.tol", "default_value": "1e-08", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-8", "description": "Tolerance for the stopping condition." @@ -73495,14 +72947,14 @@ "qname": "sklearn.decomposition._sparse_pca.SparsePCA.__init__.method", "default_value": "'lars'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'lars', 'cd'}, default='lars'", "description": "Method to be used for optimization.\nlars: uses the least angle regression method to solve the lasso problem\n(linear_model.lars_path)\ncd: uses the coordinate descent method to compute the\nLasso solution (linear_model.Lasso). Lars will be faster if\nthe estimated components are sparse." }, "type": { "kind": "EnumType", - "values": ["cd", "lars"] + "values": ["lars", "cd"] } }, { @@ -73511,7 +72963,7 @@ "qname": "sklearn.decomposition._sparse_pca.SparsePCA.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Number of parallel jobs to run.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details." @@ -73527,7 +72979,7 @@ "qname": "sklearn.decomposition._sparse_pca.SparsePCA.__init__.U_init", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (n_samples, n_components), default=None", "description": "Initial values for the loadings for warm restart scenarios. Only used\nif `U_init` and `V_init` are not None." @@ -73543,7 +72995,7 @@ "qname": "sklearn.decomposition._sparse_pca.SparsePCA.__init__.V_init", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (n_components, n_features), default=None", "description": "Initial values for the components for warm restart scenarios. Only used\nif `U_init` and `V_init` are not None." @@ -73559,7 +73011,7 @@ "qname": "sklearn.decomposition._sparse_pca.SparsePCA.__init__.verbose", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or bool, default=False", "description": "Controls the verbosity; the higher, the more messages. Defaults to 0." @@ -73584,7 +73036,7 @@ "qname": "sklearn.decomposition._sparse_pca.SparsePCA.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Used during dictionary learning. Pass an int for reproducible results\nacross multiple function calls.\nSee :term:`Glossary `." @@ -73609,7 +73061,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -73675,7 +73127,7 @@ "qname": "sklearn.decomposition._sparse_pca.SparsePCA.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -73688,7 +73140,7 @@ "qname": "sklearn.decomposition._sparse_pca.SparsePCA.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training vector, where `n_samples` is the number of samples\nand `n_features` is the number of features." @@ -73704,7 +73156,7 @@ "qname": "sklearn.decomposition._sparse_pca.SparsePCA.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present here for API consistency by convention." @@ -73716,7 +73168,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the model from data in X.", "docstring": "Fit the model from data in X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training vector, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n " }, @@ -73732,7 +73184,7 @@ "qname": "sklearn.decomposition._sparse_pca.SparsePCA.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -73745,7 +73197,7 @@ "qname": "sklearn.decomposition._sparse_pca.SparsePCA.transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (n_samples, n_features)", "description": "Test data to be transformed, must have the same number of\nfeatures as the data used to train the model." @@ -73757,7 +73209,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Least Squares projection of the data onto the sparse components.\n\nTo avoid instability issues in case the system is under-determined,\nregularization can be applied (Ridge regression) via the\n`ridge_alpha` parameter.\n\nNote that Sparse PCA components orthogonality is not enforced as in PCA\nhence one cannot use a simple linear projection.", "docstring": "Least Squares projection of the data onto the sparse components.\n\n To avoid instability issues in case the system is under-determined,\n regularization can be applied (Ridge regression) via the\n `ridge_alpha` parameter.\n\n Note that Sparse PCA components orthogonality is not enforced as in PCA\n hence one cannot use a simple linear projection.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Test data to be transformed, must have the same number of\n features as the data used to train the model.\n\n Returns\n -------\n X_new : ndarray of shape (n_samples, n_components)\n Transformed data.\n " }, @@ -73773,7 +73225,7 @@ "qname": "sklearn.decomposition._truncated_svd.TruncatedSVD.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -73786,7 +73238,7 @@ "qname": "sklearn.decomposition._truncated_svd.TruncatedSVD.__init__.n_components", "default_value": "2", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=2", "description": "Desired dimensionality of output data.\nIf algorithm='arpack', must be strictly less than the number of features.\nIf algorithm='randomized', must be less than or equal to the number of features.\nThe default value is useful for visualisation. For LSA, a value of\n100 is recommended." @@ -73802,7 +73254,7 @@ "qname": "sklearn.decomposition._truncated_svd.TruncatedSVD.__init__.algorithm", "default_value": "'randomized'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'arpack', 'randomized'}, default='randomized'", "description": "SVD solver to use. Either \"arpack\" for the ARPACK wrapper in SciPy\n(scipy.sparse.linalg.svds), or \"randomized\" for the randomized\nalgorithm due to Halko (2009)." @@ -73818,7 +73270,7 @@ "qname": "sklearn.decomposition._truncated_svd.TruncatedSVD.__init__.n_iter", "default_value": "5", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=5", "description": "Number of iterations for randomized SVD solver. Not used by ARPACK. The\ndefault is larger than the default in\n:func:`~sklearn.utils.extmath.randomized_svd` to handle sparse\nmatrices that may have large slowly decaying spectrum." @@ -73834,7 +73286,7 @@ "qname": "sklearn.decomposition._truncated_svd.TruncatedSVD.__init__.n_oversamples", "default_value": "10", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=10", "description": "Number of oversamples for randomized SVD solver. Not used by ARPACK.\nSee :func:`~sklearn.utils.extmath.randomized_svd` for a complete\ndescription.\n\n.. versionadded:: 1.1" @@ -73850,7 +73302,7 @@ "qname": "sklearn.decomposition._truncated_svd.TruncatedSVD.__init__.power_iteration_normalizer", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{\u2018auto\u2019, \u2018QR\u2019, \u2018LU\u2019, \u2018none\u2019}, default=\u2019auto\u2019", "description": "Power iteration normalizer for randomized SVD solver.\nNot used by ARPACK. See :func:`~sklearn.utils.extmath.randomized_svd`\nfor more details.\n\n.. versionadded:: 1.1" @@ -73866,7 +73318,7 @@ "qname": "sklearn.decomposition._truncated_svd.TruncatedSVD.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Used during randomized svd. Pass an int for reproducible results across\nmultiple function calls.\nSee :term:`Glossary `." @@ -73895,7 +73347,7 @@ "qname": "sklearn.decomposition._truncated_svd.TruncatedSVD.__init__.tol", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0", "description": "Tolerance for ARPACK. 0 means machine precision. Ignored by randomized\nSVD solver." @@ -73907,7 +73359,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -73973,7 +73425,7 @@ "qname": "sklearn.decomposition._truncated_svd.TruncatedSVD.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -73986,7 +73438,7 @@ "qname": "sklearn.decomposition._truncated_svd.TruncatedSVD.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Training data." @@ -74011,7 +73463,7 @@ "qname": "sklearn.decomposition._truncated_svd.TruncatedSVD.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present here for API consistency by convention." @@ -74023,7 +73475,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit model on training data X.", "docstring": "Fit model on training data X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training data.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n self : object\n Returns the transformer object.\n " }, @@ -74039,7 +73491,7 @@ "qname": "sklearn.decomposition._truncated_svd.TruncatedSVD.fit_transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -74052,7 +73504,7 @@ "qname": "sklearn.decomposition._truncated_svd.TruncatedSVD.fit_transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Training data." @@ -74077,7 +73529,7 @@ "qname": "sklearn.decomposition._truncated_svd.TruncatedSVD.fit_transform.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present here for API consistency by convention." @@ -74089,7 +73541,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit model to X and perform dimensionality reduction on X.", "docstring": "Fit model to X and perform dimensionality reduction on X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training data.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n X_new : ndarray of shape (n_samples, n_components)\n Reduced version of X. This will always be a dense array.\n " }, @@ -74105,7 +73557,7 @@ "qname": "sklearn.decomposition._truncated_svd.TruncatedSVD.inverse_transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -74118,7 +73570,7 @@ "qname": "sklearn.decomposition._truncated_svd.TruncatedSVD.inverse_transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_components)", "description": "New data." @@ -74130,7 +73582,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Transform X back to its original space.\n\nReturns an array X_original whose transform would be X.", "docstring": "Transform X back to its original space.\n\n Returns an array X_original whose transform would be X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_components)\n New data.\n\n Returns\n -------\n X_original : ndarray of shape (n_samples, n_features)\n Note that this is always a dense array.\n " }, @@ -74146,7 +73598,7 @@ "qname": "sklearn.decomposition._truncated_svd.TruncatedSVD.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -74159,7 +73611,7 @@ "qname": "sklearn.decomposition._truncated_svd.TruncatedSVD.transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "New data." @@ -74180,7 +73632,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Perform dimensionality reduction on X.", "docstring": "Perform dimensionality reduction on X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n New data.\n\n Returns\n -------\n X_new : ndarray of shape (n_samples, n_components)\n Reduced version of X. This will always be a dense array.\n " }, @@ -74254,7 +73706,7 @@ }, "type": { "kind": "EnumType", - "values": ["lsqr", "svd", "eigen"] + "values": ["svd", "lsqr", "eigen"] } }, { @@ -75426,7 +74878,7 @@ }, "type": { "kind": "EnumType", - "values": ["uniform", "stratified", "constant", "prior", "most_frequent"] + "values": ["constant", "uniform", "most_frequent", "stratified", "prior"] } }, { @@ -75856,7 +75308,7 @@ }, "type": { "kind": "EnumType", - "values": ["median", "mean", "constant", "quantile"] + "values": ["mean", "constant", "quantile", "median"] } }, { @@ -76196,7 +75648,7 @@ "qname": "sklearn.ensemble._bagging.BaggingClassifier.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -76209,7 +75661,7 @@ "qname": "sklearn.ensemble._bagging.BaggingClassifier.__init__.base_estimator", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "object, default=None", "description": "The base estimator to fit on random subsets of the dataset.\nIf None, then the base estimator is a\n:class:`~sklearn.tree.DecisionTreeClassifier`." @@ -76225,7 +75677,7 @@ "qname": "sklearn.ensemble._bagging.BaggingClassifier.__init__.n_estimators", "default_value": "10", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=10", "description": "The number of base estimators in the ensemble." @@ -76241,7 +75693,7 @@ "qname": "sklearn.ensemble._bagging.BaggingClassifier.__init__.max_samples", "default_value": "1.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or float, default=1.0", "description": "The number of samples to draw from X to train each base estimator (with\nreplacement by default, see `bootstrap` for more details).\n\n- If int, then draw `max_samples` samples.\n- If float, then draw `max_samples * X.shape[0]` samples." @@ -76266,7 +75718,7 @@ "qname": "sklearn.ensemble._bagging.BaggingClassifier.__init__.max_features", "default_value": "1.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or float, default=1.0", "description": "The number of features to draw from X to train each base estimator (\nwithout replacement by default, see `bootstrap_features` for more\ndetails).\n\n- If int, then draw `max_features` features.\n- If float, then draw `max_features * X.shape[1]` features." @@ -76291,7 +75743,7 @@ "qname": "sklearn.ensemble._bagging.BaggingClassifier.__init__.bootstrap", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether samples are drawn with replacement. If False, sampling\nwithout replacement is performed." @@ -76307,7 +75759,7 @@ "qname": "sklearn.ensemble._bagging.BaggingClassifier.__init__.bootstrap_features", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Whether features are drawn with replacement." @@ -76323,7 +75775,7 @@ "qname": "sklearn.ensemble._bagging.BaggingClassifier.__init__.oob_score", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Whether to use out-of-bag samples to estimate\nthe generalization error. Only available if bootstrap=True." @@ -76339,7 +75791,7 @@ "qname": "sklearn.ensemble._bagging.BaggingClassifier.__init__.warm_start", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "When set to True, reuse the solution of the previous call to fit\nand add more estimators to the ensemble, otherwise, just fit\na whole new ensemble. See :term:`the Glossary `.\n\n.. versionadded:: 0.17\n *warm_start* constructor parameter." @@ -76355,7 +75807,7 @@ "qname": "sklearn.ensemble._bagging.BaggingClassifier.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "The number of jobs to run in parallel for both :meth:`fit` and\n:meth:`predict`. ``None`` means 1 unless in a\n:obj:`joblib.parallel_backend` context. ``-1`` means using all\nprocessors. See :term:`Glossary ` for more details." @@ -76371,7 +75823,7 @@ "qname": "sklearn.ensemble._bagging.BaggingClassifier.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Controls the random resampling of the original dataset\n(sample wise and feature wise).\nIf the base estimator accepts a `random_state` attribute, a different\nseed is generated for each instance in the ensemble.\nPass an int for reproducible output across multiple function calls.\nSee :term:`Glossary `." @@ -76400,7 +75852,7 @@ "qname": "sklearn.ensemble._bagging.BaggingClassifier.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "Controls the verbosity when fitting and predicting." @@ -76412,7 +75864,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -76542,7 +75994,7 @@ "qname": "sklearn.ensemble._bagging.BaggingClassifier.decision_function.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -76555,7 +76007,7 @@ "qname": "sklearn.ensemble._bagging.BaggingClassifier.decision_function.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The training input samples. Sparse matrices are accepted only if\nthey are supported by the base estimator." @@ -76576,7 +76028,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Average of the decision functions of the base classifiers.", "docstring": "Average of the decision functions of the base classifiers.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The training input samples. Sparse matrices are accepted only if\n they are supported by the base estimator.\n\n Returns\n -------\n score : ndarray of shape (n_samples, k)\n The decision function of the input samples. The columns correspond\n to the classes in sorted order, as they appear in the attribute\n ``classes_``. Regression and binary classification are special\n cases with ``k == 1``, otherwise ``k==n_classes``.\n " }, @@ -76592,7 +76044,7 @@ "qname": "sklearn.ensemble._bagging.BaggingClassifier.predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -76605,7 +76057,7 @@ "qname": "sklearn.ensemble._bagging.BaggingClassifier.predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The training input samples. Sparse matrices are accepted only if\nthey are supported by the base estimator." @@ -76626,7 +76078,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict class for X.\n\nThe predicted class of an input sample is computed as the class with\nthe highest mean predicted probability. If base estimators do not\nimplement a ``predict_proba`` method, then it resorts to voting.", "docstring": "Predict class for X.\n\n The predicted class of an input sample is computed as the class with\n the highest mean predicted probability. If base estimators do not\n implement a ``predict_proba`` method, then it resorts to voting.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The training input samples. Sparse matrices are accepted only if\n they are supported by the base estimator.\n\n Returns\n -------\n y : ndarray of shape (n_samples,)\n The predicted classes.\n " }, @@ -76642,7 +76094,7 @@ "qname": "sklearn.ensemble._bagging.BaggingClassifier.predict_log_proba.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -76655,7 +76107,7 @@ "qname": "sklearn.ensemble._bagging.BaggingClassifier.predict_log_proba.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The training input samples. Sparse matrices are accepted only if\nthey are supported by the base estimator." @@ -76676,7 +76128,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict class log-probabilities for X.\n\nThe predicted class log-probabilities of an input sample is computed as\nthe log of the mean predicted class probabilities of the base\nestimators in the ensemble.", "docstring": "Predict class log-probabilities for X.\n\n The predicted class log-probabilities of an input sample is computed as\n the log of the mean predicted class probabilities of the base\n estimators in the ensemble.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The training input samples. Sparse matrices are accepted only if\n they are supported by the base estimator.\n\n Returns\n -------\n p : ndarray of shape (n_samples, n_classes)\n The class log-probabilities of the input samples. The order of the\n classes corresponds to that in the attribute :term:`classes_`.\n " }, @@ -76692,7 +76144,7 @@ "qname": "sklearn.ensemble._bagging.BaggingClassifier.predict_proba.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -76705,7 +76157,7 @@ "qname": "sklearn.ensemble._bagging.BaggingClassifier.predict_proba.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The training input samples. Sparse matrices are accepted only if\nthey are supported by the base estimator." @@ -76726,7 +76178,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict class probabilities for X.\n\nThe predicted class probabilities of an input sample is computed as\nthe mean predicted class probabilities of the base estimators in the\nensemble. If base estimators do not implement a ``predict_proba``\nmethod, then it resorts to voting and the predicted class probabilities\nof an input sample represents the proportion of estimators predicting\neach class.", "docstring": "Predict class probabilities for X.\n\n The predicted class probabilities of an input sample is computed as\n the mean predicted class probabilities of the base estimators in the\n ensemble. If base estimators do not implement a ``predict_proba``\n method, then it resorts to voting and the predicted class probabilities\n of an input sample represents the proportion of estimators predicting\n each class.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The training input samples. Sparse matrices are accepted only if\n they are supported by the base estimator.\n\n Returns\n -------\n p : ndarray of shape (n_samples, n_classes)\n The class probabilities of the input samples. The order of the\n classes corresponds to that in the attribute :term:`classes_`.\n " }, @@ -76742,7 +76194,7 @@ "qname": "sklearn.ensemble._bagging.BaggingRegressor.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -76755,7 +76207,7 @@ "qname": "sklearn.ensemble._bagging.BaggingRegressor.__init__.base_estimator", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "object, default=None", "description": "The base estimator to fit on random subsets of the dataset.\nIf None, then the base estimator is a\n:class:`~sklearn.tree.DecisionTreeRegressor`." @@ -76771,7 +76223,7 @@ "qname": "sklearn.ensemble._bagging.BaggingRegressor.__init__.n_estimators", "default_value": "10", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=10", "description": "The number of base estimators in the ensemble." @@ -76787,7 +76239,7 @@ "qname": "sklearn.ensemble._bagging.BaggingRegressor.__init__.max_samples", "default_value": "1.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or float, default=1.0", "description": "The number of samples to draw from X to train each base estimator (with\nreplacement by default, see `bootstrap` for more details).\n\n- If int, then draw `max_samples` samples.\n- If float, then draw `max_samples * X.shape[0]` samples." @@ -76812,7 +76264,7 @@ "qname": "sklearn.ensemble._bagging.BaggingRegressor.__init__.max_features", "default_value": "1.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or float, default=1.0", "description": "The number of features to draw from X to train each base estimator (\nwithout replacement by default, see `bootstrap_features` for more\ndetails).\n\n- If int, then draw `max_features` features.\n- If float, then draw `max_features * X.shape[1]` features." @@ -76837,7 +76289,7 @@ "qname": "sklearn.ensemble._bagging.BaggingRegressor.__init__.bootstrap", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether samples are drawn with replacement. If False, sampling\nwithout replacement is performed." @@ -76853,7 +76305,7 @@ "qname": "sklearn.ensemble._bagging.BaggingRegressor.__init__.bootstrap_features", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Whether features are drawn with replacement." @@ -76869,7 +76321,7 @@ "qname": "sklearn.ensemble._bagging.BaggingRegressor.__init__.oob_score", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Whether to use out-of-bag samples to estimate\nthe generalization error. Only available if bootstrap=True." @@ -76885,7 +76337,7 @@ "qname": "sklearn.ensemble._bagging.BaggingRegressor.__init__.warm_start", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "When set to True, reuse the solution of the previous call to fit\nand add more estimators to the ensemble, otherwise, just fit\na whole new ensemble. See :term:`the Glossary `." @@ -76901,7 +76353,7 @@ "qname": "sklearn.ensemble._bagging.BaggingRegressor.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "The number of jobs to run in parallel for both :meth:`fit` and\n:meth:`predict`. ``None`` means 1 unless in a\n:obj:`joblib.parallel_backend` context. ``-1`` means using all\nprocessors. See :term:`Glossary ` for more details." @@ -76917,7 +76369,7 @@ "qname": "sklearn.ensemble._bagging.BaggingRegressor.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Controls the random resampling of the original dataset\n(sample wise and feature wise).\nIf the base estimator accepts a `random_state` attribute, a different\nseed is generated for each instance in the ensemble.\nPass an int for reproducible output across multiple function calls.\nSee :term:`Glossary `." @@ -76946,7 +76398,7 @@ "qname": "sklearn.ensemble._bagging.BaggingRegressor.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "Controls the verbosity when fitting and predicting." @@ -76958,7 +76410,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -77050,7 +76502,7 @@ "qname": "sklearn.ensemble._bagging.BaggingRegressor.predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -77063,7 +76515,7 @@ "qname": "sklearn.ensemble._bagging.BaggingRegressor.predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The training input samples. Sparse matrices are accepted only if\nthey are supported by the base estimator." @@ -77084,7 +76536,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict regression target for X.\n\nThe predicted regression target of an input sample is computed as the\nmean predicted regression targets of the estimators in the ensemble.", "docstring": "Predict regression target for X.\n\n The predicted regression target of an input sample is computed as the\n mean predicted regression targets of the estimators in the ensemble.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The training input samples. Sparse matrices are accepted only if\n they are supported by the base estimator.\n\n Returns\n -------\n y : ndarray of shape (n_samples,)\n The predicted values.\n " }, @@ -78232,7 +77684,7 @@ "qname": "sklearn.ensemble._base.BaseEnsemble.__getitem__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -78245,7 +77697,7 @@ "qname": "sklearn.ensemble._base.BaseEnsemble.__getitem__.index", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -78254,7 +77706,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Return the index'th estimator in the ensemble.", "docstring": "Return the index'th estimator in the ensemble." }, @@ -78270,7 +77722,7 @@ "qname": "sklearn.ensemble._base.BaseEnsemble.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -78283,7 +77735,7 @@ "qname": "sklearn.ensemble._base.BaseEnsemble.__init__.base_estimator", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "object", "description": "The base estimator from which the ensemble is built." @@ -78299,7 +77751,7 @@ "qname": "sklearn.ensemble._base.BaseEnsemble.__init__.n_estimators", "default_value": "10", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=10", "description": "The number of estimators in the ensemble." @@ -78315,7 +77767,7 @@ "qname": "sklearn.ensemble._base.BaseEnsemble.__init__.estimator_params", "default_value": "tuple()", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "list of str, default=tuple()", "description": "The list of attributes to use as parameters when instantiating a\nnew base estimator. If none are given, default parameters are used." @@ -78327,7 +77779,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -78343,7 +77795,7 @@ "qname": "sklearn.ensemble._base.BaseEnsemble.__iter__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -78352,7 +77804,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Return iterator over estimators in the ensemble.", "docstring": "Return iterator over estimators in the ensemble." }, @@ -78368,7 +77820,7 @@ "qname": "sklearn.ensemble._base.BaseEnsemble.__len__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -78377,7 +77829,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Return the number of estimators in the ensemble.", "docstring": "Return the number of estimators in the ensemble." }, @@ -79417,7 +78869,7 @@ "qname": "sklearn.ensemble._forest.ExtraTreesClassifier.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -79430,7 +78882,7 @@ "qname": "sklearn.ensemble._forest.ExtraTreesClassifier.__init__.n_estimators", "default_value": "100", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=100", "description": "The number of trees in the forest.\n\n.. versionchanged:: 0.22\n The default value of ``n_estimators`` changed from 10 to 100\n in 0.22." @@ -79446,14 +78898,14 @@ "qname": "sklearn.ensemble._forest.ExtraTreesClassifier.__init__.criterion", "default_value": "'gini'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{\"gini\", \"entropy\", \"log_loss\"}, default=\"gini\"", "description": "The function to measure the quality of a split. Supported criteria are\n\"gini\" for the Gini impurity and \"log_loss\" and \"entropy\" both for the\nShannon information gain, see :ref:`tree_mathematical_formulation`.\nNote: This parameter is tree-specific." }, "type": { "kind": "EnumType", - "values": ["entropy", "log_loss", "gini"] + "values": ["gini", "entropy", "log_loss"] } }, { @@ -79462,7 +78914,7 @@ "qname": "sklearn.ensemble._forest.ExtraTreesClassifier.__init__.max_depth", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "The maximum depth of the tree. If None, then nodes are expanded until\nall leaves are pure or until all leaves contain less than\nmin_samples_split samples." @@ -79478,7 +78930,7 @@ "qname": "sklearn.ensemble._forest.ExtraTreesClassifier.__init__.min_samples_split", "default_value": "2", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or float, default=2", "description": "The minimum number of samples required to split an internal node:\n\n- If int, then consider `min_samples_split` as the minimum number.\n- If float, then `min_samples_split` is a fraction and\n `ceil(min_samples_split * n_samples)` are the minimum\n number of samples for each split.\n\n.. versionchanged:: 0.18\n Added float values for fractions." @@ -79503,7 +78955,7 @@ "qname": "sklearn.ensemble._forest.ExtraTreesClassifier.__init__.min_samples_leaf", "default_value": "1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or float, default=1", "description": "The minimum number of samples required to be at a leaf node.\nA split point at any depth will only be considered if it leaves at\nleast ``min_samples_leaf`` training samples in each of the left and\nright branches. This may have the effect of smoothing the model,\nespecially in regression.\n\n- If int, then consider `min_samples_leaf` as the minimum number.\n- If float, then `min_samples_leaf` is a fraction and\n `ceil(min_samples_leaf * n_samples)` are the minimum\n number of samples for each node.\n\n.. versionchanged:: 0.18\n Added float values for fractions." @@ -79528,7 +78980,7 @@ "qname": "sklearn.ensemble._forest.ExtraTreesClassifier.__init__.min_weight_fraction_leaf", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0", "description": "The minimum weighted fraction of the sum total of weights (of all\nthe input samples) required to be at a leaf node. Samples have\nequal weight when sample_weight is not provided." @@ -79544,7 +78996,7 @@ "qname": "sklearn.ensemble._forest.ExtraTreesClassifier.__init__.max_features", "default_value": "'sqrt'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{\"sqrt\", \"log2\", None}, int or float, default=\"sqrt\"", "description": "The number of features to consider when looking for the best split:\n\n- If int, then consider `max_features` features at each split.\n- If float, then `max_features` is a fraction and\n `round(max_features * n_features)` features are considered at each\n split.\n- If \"auto\", then `max_features=sqrt(n_features)`.\n- If \"sqrt\", then `max_features=sqrt(n_features)`.\n- If \"log2\", then `max_features=log2(n_features)`.\n- If None, then `max_features=n_features`.\n\n.. versionchanged:: 1.1\n The default of `max_features` changed from `\"auto\"` to `\"sqrt\"`.\n\n.. deprecated:: 1.1\n The `\"auto\"` option was deprecated in 1.1 and will be removed\n in 1.3.\n\nNote: the search for a split does not stop until at least one\nvalid partition of the node samples is found, even if it requires to\neffectively inspect more than ``max_features`` features." @@ -79554,7 +79006,7 @@ "types": [ { "kind": "EnumType", - "values": ["sqrt", "log2"] + "values": ["log2", "sqrt"] }, { "kind": "NamedType", @@ -79573,7 +79025,7 @@ "qname": "sklearn.ensemble._forest.ExtraTreesClassifier.__init__.max_leaf_nodes", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Grow trees with ``max_leaf_nodes`` in best-first fashion.\nBest nodes are defined as relative reduction in impurity.\nIf None then unlimited number of leaf nodes." @@ -79589,7 +79041,7 @@ "qname": "sklearn.ensemble._forest.ExtraTreesClassifier.__init__.min_impurity_decrease", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0", "description": "A node will be split if this split induces a decrease of the impurity\ngreater than or equal to this value.\n\nThe weighted impurity decrease equation is the following::\n\n N_t / N * (impurity - N_t_R / N_t * right_impurity\n - N_t_L / N_t * left_impurity)\n\nwhere ``N`` is the total number of samples, ``N_t`` is the number of\nsamples at the current node, ``N_t_L`` is the number of samples in the\nleft child, and ``N_t_R`` is the number of samples in the right child.\n\n``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,\nif ``sample_weight`` is passed.\n\n.. versionadded:: 0.19" @@ -79605,7 +79057,7 @@ "qname": "sklearn.ensemble._forest.ExtraTreesClassifier.__init__.bootstrap", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Whether bootstrap samples are used when building trees. If False, the\nwhole dataset is used to build each tree." @@ -79621,7 +79073,7 @@ "qname": "sklearn.ensemble._forest.ExtraTreesClassifier.__init__.oob_score", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Whether to use out-of-bag samples to estimate the generalization score.\nOnly available if bootstrap=True." @@ -79637,7 +79089,7 @@ "qname": "sklearn.ensemble._forest.ExtraTreesClassifier.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "The number of jobs to run in parallel. :meth:`fit`, :meth:`predict`,\n:meth:`decision_path` and :meth:`apply` are all parallelized over the\ntrees. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`\ncontext. ``-1`` means using all processors. See :term:`Glossary\n` for more details." @@ -79653,7 +79105,7 @@ "qname": "sklearn.ensemble._forest.ExtraTreesClassifier.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Controls 3 sources of randomness:\n\n- the bootstrapping of the samples used when building trees\n (if ``bootstrap=True``)\n- the sampling of the features to consider when looking for the best\n split at each node (if ``max_features < n_features``)\n- the draw of the splits for each of the `max_features`\n\nSee :term:`Glossary ` for details." @@ -79682,7 +79134,7 @@ "qname": "sklearn.ensemble._forest.ExtraTreesClassifier.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "Controls the verbosity when fitting and predicting." @@ -79698,7 +79150,7 @@ "qname": "sklearn.ensemble._forest.ExtraTreesClassifier.__init__.warm_start", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "When set to ``True``, reuse the solution of the previous call to fit\nand add more estimators to the ensemble, otherwise, just fit a whole\nnew forest. See :term:`the Glossary `." @@ -79714,7 +79166,7 @@ "qname": "sklearn.ensemble._forest.ExtraTreesClassifier.__init__.class_weight", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{\"balanced\", \"balanced_subsample\"}, dict or list of dicts, default=None", "description": "Weights associated with classes in the form ``{class_label: weight}``.\nIf not given, all classes are supposed to have weight one. For\nmulti-output problems, a list of dicts can be provided in the same\norder as the columns of y.\n\nNote that for multioutput (including multilabel) weights should be\ndefined for each class of every column in its own dict. For example,\nfor four-class multilabel classification weights should be\n[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of\n[{1:1}, {2:5}, {3:1}, {4:1}].\n\nThe \"balanced\" mode uses the values of y to automatically adjust\nweights inversely proportional to class frequencies in the input data\nas ``n_samples / (n_classes * np.bincount(y))``\n\nThe \"balanced_subsample\" mode is the same as \"balanced\" except that\nweights are computed based on the bootstrap sample for every tree\ngrown.\n\nFor multi-output, the weights of each column of y will be multiplied.\n\nNote that these weights will be multiplied with sample_weight (passed\nthrough the fit method) if sample_weight is specified." @@ -79743,7 +79195,7 @@ "qname": "sklearn.ensemble._forest.ExtraTreesClassifier.__init__.ccp_alpha", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "non-negative float, default=0.0", "description": "Complexity parameter used for Minimal Cost-Complexity Pruning. The\nsubtree with the largest cost complexity that is smaller than\n``ccp_alpha`` will be chosen. By default, no pruning is performed. See\n:ref:`minimal_cost_complexity_pruning` for details.\n\n.. versionadded:: 0.22" @@ -79759,7 +79211,7 @@ "qname": "sklearn.ensemble._forest.ExtraTreesClassifier.__init__.max_samples", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or float, default=None", "description": "If bootstrap is True, the number of samples to draw from X\nto train each base estimator.\n\n- If None (default), then draw `X.shape[0]` samples.\n- If int, then draw `max_samples` samples.\n- If float, then draw `max_samples * X.shape[0]` samples. Thus,\n `max_samples` should be in the interval `(0.0, 1.0]`.\n\n.. versionadded:: 0.22" @@ -79788,7 +79240,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -79804,7 +79256,7 @@ "qname": "sklearn.ensemble._forest.ExtraTreesRegressor.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -79817,7 +79269,7 @@ "qname": "sklearn.ensemble._forest.ExtraTreesRegressor.__init__.n_estimators", "default_value": "100", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=100", "description": "The number of trees in the forest.\n\n.. versionchanged:: 0.22\n The default value of ``n_estimators`` changed from 10 to 100\n in 0.22." @@ -79833,7 +79285,7 @@ "qname": "sklearn.ensemble._forest.ExtraTreesRegressor.__init__.criterion", "default_value": "'squared_error'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{\"squared_error\", \"absolute_error\"}, default=\"squared_error\"", "description": "The function to measure the quality of a split. Supported criteria\nare \"squared_error\" for the mean squared error, which is equal to\nvariance reduction as feature selection criterion, and \"absolute_error\"\nfor the mean absolute error.\n\n.. versionadded:: 0.18\n Mean Absolute Error (MAE) criterion.\n\n.. deprecated:: 1.0\n Criterion \"mse\" was deprecated in v1.0 and will be removed in\n version 1.2. Use `criterion=\"squared_error\"` which is equivalent.\n\n.. deprecated:: 1.0\n Criterion \"mae\" was deprecated in v1.0 and will be removed in\n version 1.2. Use `criterion=\"absolute_error\"` which is equivalent." @@ -79849,7 +79301,7 @@ "qname": "sklearn.ensemble._forest.ExtraTreesRegressor.__init__.max_depth", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "The maximum depth of the tree. If None, then nodes are expanded until\nall leaves are pure or until all leaves contain less than\nmin_samples_split samples." @@ -79865,7 +79317,7 @@ "qname": "sklearn.ensemble._forest.ExtraTreesRegressor.__init__.min_samples_split", "default_value": "2", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or float, default=2", "description": "The minimum number of samples required to split an internal node:\n\n- If int, then consider `min_samples_split` as the minimum number.\n- If float, then `min_samples_split` is a fraction and\n `ceil(min_samples_split * n_samples)` are the minimum\n number of samples for each split.\n\n.. versionchanged:: 0.18\n Added float values for fractions." @@ -79890,7 +79342,7 @@ "qname": "sklearn.ensemble._forest.ExtraTreesRegressor.__init__.min_samples_leaf", "default_value": "1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or float, default=1", "description": "The minimum number of samples required to be at a leaf node.\nA split point at any depth will only be considered if it leaves at\nleast ``min_samples_leaf`` training samples in each of the left and\nright branches. This may have the effect of smoothing the model,\nespecially in regression.\n\n- If int, then consider `min_samples_leaf` as the minimum number.\n- If float, then `min_samples_leaf` is a fraction and\n `ceil(min_samples_leaf * n_samples)` are the minimum\n number of samples for each node.\n\n.. versionchanged:: 0.18\n Added float values for fractions." @@ -79915,7 +79367,7 @@ "qname": "sklearn.ensemble._forest.ExtraTreesRegressor.__init__.min_weight_fraction_leaf", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0", "description": "The minimum weighted fraction of the sum total of weights (of all\nthe input samples) required to be at a leaf node. Samples have\nequal weight when sample_weight is not provided." @@ -79931,7 +79383,7 @@ "qname": "sklearn.ensemble._forest.ExtraTreesRegressor.__init__.max_features", "default_value": "1.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{\"sqrt\", \"log2\", None}, int or float, default=1.0", "description": "The number of features to consider when looking for the best split:\n\n- If int, then consider `max_features` features at each split.\n- If float, then `max_features` is a fraction and\n `round(max_features * n_features)` features are considered at each\n split.\n- If \"auto\", then `max_features=n_features`.\n- If \"sqrt\", then `max_features=sqrt(n_features)`.\n- If \"log2\", then `max_features=log2(n_features)`.\n- If None or 1.0, then `max_features=n_features`.\n\n.. note::\n The default of 1.0 is equivalent to bagged trees and more\n randomness can be achieved by setting smaller values, e.g. 0.3.\n\n.. versionchanged:: 1.1\n The default of `max_features` changed from `\"auto\"` to 1.0.\n\n.. deprecated:: 1.1\n The `\"auto\"` option was deprecated in 1.1 and will be removed\n in 1.3.\n\nNote: the search for a split does not stop until at least one\nvalid partition of the node samples is found, even if it requires to\neffectively inspect more than ``max_features`` features." @@ -79941,7 +79393,7 @@ "types": [ { "kind": "EnumType", - "values": ["sqrt", "log2"] + "values": ["log2", "sqrt"] }, { "kind": "NamedType", @@ -79960,7 +79412,7 @@ "qname": "sklearn.ensemble._forest.ExtraTreesRegressor.__init__.max_leaf_nodes", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Grow trees with ``max_leaf_nodes`` in best-first fashion.\nBest nodes are defined as relative reduction in impurity.\nIf None then unlimited number of leaf nodes." @@ -79976,7 +79428,7 @@ "qname": "sklearn.ensemble._forest.ExtraTreesRegressor.__init__.min_impurity_decrease", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0", "description": "A node will be split if this split induces a decrease of the impurity\ngreater than or equal to this value.\n\nThe weighted impurity decrease equation is the following::\n\n N_t / N * (impurity - N_t_R / N_t * right_impurity\n - N_t_L / N_t * left_impurity)\n\nwhere ``N`` is the total number of samples, ``N_t`` is the number of\nsamples at the current node, ``N_t_L`` is the number of samples in the\nleft child, and ``N_t_R`` is the number of samples in the right child.\n\n``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,\nif ``sample_weight`` is passed.\n\n.. versionadded:: 0.19" @@ -79992,7 +79444,7 @@ "qname": "sklearn.ensemble._forest.ExtraTreesRegressor.__init__.bootstrap", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Whether bootstrap samples are used when building trees. If False, the\nwhole dataset is used to build each tree." @@ -80008,7 +79460,7 @@ "qname": "sklearn.ensemble._forest.ExtraTreesRegressor.__init__.oob_score", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Whether to use out-of-bag samples to estimate the generalization score.\nOnly available if bootstrap=True." @@ -80024,7 +79476,7 @@ "qname": "sklearn.ensemble._forest.ExtraTreesRegressor.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "The number of jobs to run in parallel. :meth:`fit`, :meth:`predict`,\n:meth:`decision_path` and :meth:`apply` are all parallelized over the\ntrees. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`\ncontext. ``-1`` means using all processors. See :term:`Glossary\n` for more details." @@ -80040,7 +79492,7 @@ "qname": "sklearn.ensemble._forest.ExtraTreesRegressor.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Controls 3 sources of randomness:\n\n- the bootstrapping of the samples used when building trees\n (if ``bootstrap=True``)\n- the sampling of the features to consider when looking for the best\n split at each node (if ``max_features < n_features``)\n- the draw of the splits for each of the `max_features`\n\nSee :term:`Glossary ` for details." @@ -80069,7 +79521,7 @@ "qname": "sklearn.ensemble._forest.ExtraTreesRegressor.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "Controls the verbosity when fitting and predicting." @@ -80085,7 +79537,7 @@ "qname": "sklearn.ensemble._forest.ExtraTreesRegressor.__init__.warm_start", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "When set to ``True``, reuse the solution of the previous call to fit\nand add more estimators to the ensemble, otherwise, just fit a whole\nnew forest. See :term:`the Glossary `." @@ -80101,7 +79553,7 @@ "qname": "sklearn.ensemble._forest.ExtraTreesRegressor.__init__.ccp_alpha", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "non-negative float, default=0.0", "description": "Complexity parameter used for Minimal Cost-Complexity Pruning. The\nsubtree with the largest cost complexity that is smaller than\n``ccp_alpha`` will be chosen. By default, no pruning is performed. See\n:ref:`minimal_cost_complexity_pruning` for details.\n\n.. versionadded:: 0.22" @@ -80117,7 +79569,7 @@ "qname": "sklearn.ensemble._forest.ExtraTreesRegressor.__init__.max_samples", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or float, default=None", "description": "If bootstrap is True, the number of samples to draw from X\nto train each base estimator.\n\n- If None (default), then draw `X.shape[0]` samples.\n- If int, then draw `max_samples` samples.\n- If float, then draw `max_samples * X.shape[0]` samples. Thus,\n `max_samples` should be in the interval `(0.0, 1.0]`.\n\n.. versionadded:: 0.22" @@ -80146,7 +79598,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -81032,7 +80484,7 @@ "qname": "sklearn.ensemble._forest.RandomForestClassifier.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -81045,7 +80497,7 @@ "qname": "sklearn.ensemble._forest.RandomForestClassifier.__init__.n_estimators", "default_value": "100", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=100", "description": "The number of trees in the forest.\n\n.. versionchanged:: 0.22\n The default value of ``n_estimators`` changed from 10 to 100\n in 0.22." @@ -81061,14 +80513,14 @@ "qname": "sklearn.ensemble._forest.RandomForestClassifier.__init__.criterion", "default_value": "'gini'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{\"gini\", \"entropy\", \"log_loss\"}, default=\"gini\"", "description": "The function to measure the quality of a split. Supported criteria are\n\"gini\" for the Gini impurity and \"log_loss\" and \"entropy\" both for the\nShannon information gain, see :ref:`tree_mathematical_formulation`.\nNote: This parameter is tree-specific." }, "type": { "kind": "EnumType", - "values": ["entropy", "log_loss", "gini"] + "values": ["gini", "entropy", "log_loss"] } }, { @@ -81077,7 +80529,7 @@ "qname": "sklearn.ensemble._forest.RandomForestClassifier.__init__.max_depth", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "The maximum depth of the tree. If None, then nodes are expanded until\nall leaves are pure or until all leaves contain less than\nmin_samples_split samples." @@ -81093,7 +80545,7 @@ "qname": "sklearn.ensemble._forest.RandomForestClassifier.__init__.min_samples_split", "default_value": "2", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or float, default=2", "description": "The minimum number of samples required to split an internal node:\n\n- If int, then consider `min_samples_split` as the minimum number.\n- If float, then `min_samples_split` is a fraction and\n `ceil(min_samples_split * n_samples)` are the minimum\n number of samples for each split.\n\n.. versionchanged:: 0.18\n Added float values for fractions." @@ -81118,7 +80570,7 @@ "qname": "sklearn.ensemble._forest.RandomForestClassifier.__init__.min_samples_leaf", "default_value": "1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or float, default=1", "description": "The minimum number of samples required to be at a leaf node.\nA split point at any depth will only be considered if it leaves at\nleast ``min_samples_leaf`` training samples in each of the left and\nright branches. This may have the effect of smoothing the model,\nespecially in regression.\n\n- If int, then consider `min_samples_leaf` as the minimum number.\n- If float, then `min_samples_leaf` is a fraction and\n `ceil(min_samples_leaf * n_samples)` are the minimum\n number of samples for each node.\n\n.. versionchanged:: 0.18\n Added float values for fractions." @@ -81143,7 +80595,7 @@ "qname": "sklearn.ensemble._forest.RandomForestClassifier.__init__.min_weight_fraction_leaf", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0", "description": "The minimum weighted fraction of the sum total of weights (of all\nthe input samples) required to be at a leaf node. Samples have\nequal weight when sample_weight is not provided." @@ -81159,7 +80611,7 @@ "qname": "sklearn.ensemble._forest.RandomForestClassifier.__init__.max_features", "default_value": "'sqrt'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{\"sqrt\", \"log2\", None}, int or float, default=\"sqrt\"", "description": "The number of features to consider when looking for the best split:\n\n- If int, then consider `max_features` features at each split.\n- If float, then `max_features` is a fraction and\n `round(max_features * n_features)` features are considered at each\n split.\n- If \"auto\", then `max_features=sqrt(n_features)`.\n- If \"sqrt\", then `max_features=sqrt(n_features)`.\n- If \"log2\", then `max_features=log2(n_features)`.\n- If None, then `max_features=n_features`.\n\n.. versionchanged:: 1.1\n The default of `max_features` changed from `\"auto\"` to `\"sqrt\"`.\n\n.. deprecated:: 1.1\n The `\"auto\"` option was deprecated in 1.1 and will be removed\n in 1.3.\n\nNote: the search for a split does not stop until at least one\nvalid partition of the node samples is found, even if it requires to\neffectively inspect more than ``max_features`` features." @@ -81169,7 +80621,7 @@ "types": [ { "kind": "EnumType", - "values": ["sqrt", "log2"] + "values": ["log2", "sqrt"] }, { "kind": "NamedType", @@ -81188,7 +80640,7 @@ "qname": "sklearn.ensemble._forest.RandomForestClassifier.__init__.max_leaf_nodes", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Grow trees with ``max_leaf_nodes`` in best-first fashion.\nBest nodes are defined as relative reduction in impurity.\nIf None then unlimited number of leaf nodes." @@ -81204,7 +80656,7 @@ "qname": "sklearn.ensemble._forest.RandomForestClassifier.__init__.min_impurity_decrease", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0", "description": "A node will be split if this split induces a decrease of the impurity\ngreater than or equal to this value.\n\nThe weighted impurity decrease equation is the following::\n\n N_t / N * (impurity - N_t_R / N_t * right_impurity\n - N_t_L / N_t * left_impurity)\n\nwhere ``N`` is the total number of samples, ``N_t`` is the number of\nsamples at the current node, ``N_t_L`` is the number of samples in the\nleft child, and ``N_t_R`` is the number of samples in the right child.\n\n``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,\nif ``sample_weight`` is passed.\n\n.. versionadded:: 0.19" @@ -81220,7 +80672,7 @@ "qname": "sklearn.ensemble._forest.RandomForestClassifier.__init__.bootstrap", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether bootstrap samples are used when building trees. If False, the\nwhole dataset is used to build each tree." @@ -81236,7 +80688,7 @@ "qname": "sklearn.ensemble._forest.RandomForestClassifier.__init__.oob_score", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Whether to use out-of-bag samples to estimate the generalization score.\nOnly available if bootstrap=True." @@ -81252,7 +80704,7 @@ "qname": "sklearn.ensemble._forest.RandomForestClassifier.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "The number of jobs to run in parallel. :meth:`fit`, :meth:`predict`,\n:meth:`decision_path` and :meth:`apply` are all parallelized over the\ntrees. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`\ncontext. ``-1`` means using all processors. See :term:`Glossary\n` for more details." @@ -81268,7 +80720,7 @@ "qname": "sklearn.ensemble._forest.RandomForestClassifier.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Controls both the randomness of the bootstrapping of the samples used\nwhen building trees (if ``bootstrap=True``) and the sampling of the\nfeatures to consider when looking for the best split at each node\n(if ``max_features < n_features``).\nSee :term:`Glossary ` for details." @@ -81297,7 +80749,7 @@ "qname": "sklearn.ensemble._forest.RandomForestClassifier.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "Controls the verbosity when fitting and predicting." @@ -81313,7 +80765,7 @@ "qname": "sklearn.ensemble._forest.RandomForestClassifier.__init__.warm_start", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "When set to ``True``, reuse the solution of the previous call to fit\nand add more estimators to the ensemble, otherwise, just fit a whole\nnew forest. See :term:`the Glossary `." @@ -81329,7 +80781,7 @@ "qname": "sklearn.ensemble._forest.RandomForestClassifier.__init__.class_weight", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{\"balanced\", \"balanced_subsample\"}, dict or list of dicts, default=None", "description": "Weights associated with classes in the form ``{class_label: weight}``.\nIf not given, all classes are supposed to have weight one. For\nmulti-output problems, a list of dicts can be provided in the same\norder as the columns of y.\n\nNote that for multioutput (including multilabel) weights should be\ndefined for each class of every column in its own dict. For example,\nfor four-class multilabel classification weights should be\n[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of\n[{1:1}, {2:5}, {3:1}, {4:1}].\n\nThe \"balanced\" mode uses the values of y to automatically adjust\nweights inversely proportional to class frequencies in the input data\nas ``n_samples / (n_classes * np.bincount(y))``\n\nThe \"balanced_subsample\" mode is the same as \"balanced\" except that\nweights are computed based on the bootstrap sample for every tree\ngrown.\n\nFor multi-output, the weights of each column of y will be multiplied.\n\nNote that these weights will be multiplied with sample_weight (passed\nthrough the fit method) if sample_weight is specified." @@ -81358,7 +80810,7 @@ "qname": "sklearn.ensemble._forest.RandomForestClassifier.__init__.ccp_alpha", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "non-negative float, default=0.0", "description": "Complexity parameter used for Minimal Cost-Complexity Pruning. The\nsubtree with the largest cost complexity that is smaller than\n``ccp_alpha`` will be chosen. By default, no pruning is performed. See\n:ref:`minimal_cost_complexity_pruning` for details.\n\n.. versionadded:: 0.22" @@ -81374,7 +80826,7 @@ "qname": "sklearn.ensemble._forest.RandomForestClassifier.__init__.max_samples", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or float, default=None", "description": "If bootstrap is True, the number of samples to draw from X\nto train each base estimator.\n\n- If None (default), then draw `X.shape[0]` samples.\n- If int, then draw `max_samples` samples.\n- If float, then draw `max_samples * X.shape[0]` samples. Thus,\n `max_samples` should be in the interval `(0.0, 1.0]`.\n\n.. versionadded:: 0.22" @@ -81403,7 +80855,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -81419,7 +80871,7 @@ "qname": "sklearn.ensemble._forest.RandomForestRegressor.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -81432,7 +80884,7 @@ "qname": "sklearn.ensemble._forest.RandomForestRegressor.__init__.n_estimators", "default_value": "100", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=100", "description": "The number of trees in the forest.\n\n.. versionchanged:: 0.22\n The default value of ``n_estimators`` changed from 10 to 100\n in 0.22." @@ -81448,14 +80900,14 @@ "qname": "sklearn.ensemble._forest.RandomForestRegressor.__init__.criterion", "default_value": "'squared_error'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{\"squared_error\", \"absolute_error\", \"poisson\"}, default=\"squared_error\"", "description": "The function to measure the quality of a split. Supported criteria\nare \"squared_error\" for the mean squared error, which is equal to\nvariance reduction as feature selection criterion, \"absolute_error\"\nfor the mean absolute error, and \"poisson\" which uses reduction in\nPoisson deviance to find splits.\nTraining using \"absolute_error\" is significantly slower\nthan when using \"squared_error\".\n\n.. versionadded:: 0.18\n Mean Absolute Error (MAE) criterion.\n\n.. versionadded:: 1.0\n Poisson criterion.\n\n.. deprecated:: 1.0\n Criterion \"mse\" was deprecated in v1.0 and will be removed in\n version 1.2. Use `criterion=\"squared_error\"` which is equivalent.\n\n.. deprecated:: 1.0\n Criterion \"mae\" was deprecated in v1.0 and will be removed in\n version 1.2. Use `criterion=\"absolute_error\"` which is equivalent." }, "type": { "kind": "EnumType", - "values": ["poisson", "squared_error", "absolute_error"] + "values": ["squared_error", "poisson", "absolute_error"] } }, { @@ -81464,7 +80916,7 @@ "qname": "sklearn.ensemble._forest.RandomForestRegressor.__init__.max_depth", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "The maximum depth of the tree. If None, then nodes are expanded until\nall leaves are pure or until all leaves contain less than\nmin_samples_split samples." @@ -81480,7 +80932,7 @@ "qname": "sklearn.ensemble._forest.RandomForestRegressor.__init__.min_samples_split", "default_value": "2", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or float, default=2", "description": "The minimum number of samples required to split an internal node:\n\n- If int, then consider `min_samples_split` as the minimum number.\n- If float, then `min_samples_split` is a fraction and\n `ceil(min_samples_split * n_samples)` are the minimum\n number of samples for each split.\n\n.. versionchanged:: 0.18\n Added float values for fractions." @@ -81505,7 +80957,7 @@ "qname": "sklearn.ensemble._forest.RandomForestRegressor.__init__.min_samples_leaf", "default_value": "1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or float, default=1", "description": "The minimum number of samples required to be at a leaf node.\nA split point at any depth will only be considered if it leaves at\nleast ``min_samples_leaf`` training samples in each of the left and\nright branches. This may have the effect of smoothing the model,\nespecially in regression.\n\n- If int, then consider `min_samples_leaf` as the minimum number.\n- If float, then `min_samples_leaf` is a fraction and\n `ceil(min_samples_leaf * n_samples)` are the minimum\n number of samples for each node.\n\n.. versionchanged:: 0.18\n Added float values for fractions." @@ -81530,7 +80982,7 @@ "qname": "sklearn.ensemble._forest.RandomForestRegressor.__init__.min_weight_fraction_leaf", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0", "description": "The minimum weighted fraction of the sum total of weights (of all\nthe input samples) required to be at a leaf node. Samples have\nequal weight when sample_weight is not provided." @@ -81546,7 +80998,7 @@ "qname": "sklearn.ensemble._forest.RandomForestRegressor.__init__.max_features", "default_value": "1.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{\"sqrt\", \"log2\", None}, int or float, default=1.0", "description": "The number of features to consider when looking for the best split:\n\n- If int, then consider `max_features` features at each split.\n- If float, then `max_features` is a fraction and\n `round(max_features * n_features)` features are considered at each\n split.\n- If \"auto\", then `max_features=n_features`.\n- If \"sqrt\", then `max_features=sqrt(n_features)`.\n- If \"log2\", then `max_features=log2(n_features)`.\n- If None or 1.0, then `max_features=n_features`.\n\n.. note::\n The default of 1.0 is equivalent to bagged trees and more\n randomness can be achieved by setting smaller values, e.g. 0.3.\n\n.. versionchanged:: 1.1\n The default of `max_features` changed from `\"auto\"` to 1.0.\n\n.. deprecated:: 1.1\n The `\"auto\"` option was deprecated in 1.1 and will be removed\n in 1.3.\n\nNote: the search for a split does not stop until at least one\nvalid partition of the node samples is found, even if it requires to\neffectively inspect more than ``max_features`` features." @@ -81556,7 +81008,7 @@ "types": [ { "kind": "EnumType", - "values": ["sqrt", "log2"] + "values": ["log2", "sqrt"] }, { "kind": "NamedType", @@ -81575,7 +81027,7 @@ "qname": "sklearn.ensemble._forest.RandomForestRegressor.__init__.max_leaf_nodes", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Grow trees with ``max_leaf_nodes`` in best-first fashion.\nBest nodes are defined as relative reduction in impurity.\nIf None then unlimited number of leaf nodes." @@ -81591,7 +81043,7 @@ "qname": "sklearn.ensemble._forest.RandomForestRegressor.__init__.min_impurity_decrease", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0", "description": "A node will be split if this split induces a decrease of the impurity\ngreater than or equal to this value.\n\nThe weighted impurity decrease equation is the following::\n\n N_t / N * (impurity - N_t_R / N_t * right_impurity\n - N_t_L / N_t * left_impurity)\n\nwhere ``N`` is the total number of samples, ``N_t`` is the number of\nsamples at the current node, ``N_t_L`` is the number of samples in the\nleft child, and ``N_t_R`` is the number of samples in the right child.\n\n``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,\nif ``sample_weight`` is passed.\n\n.. versionadded:: 0.19" @@ -81607,7 +81059,7 @@ "qname": "sklearn.ensemble._forest.RandomForestRegressor.__init__.bootstrap", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether bootstrap samples are used when building trees. If False, the\nwhole dataset is used to build each tree." @@ -81623,7 +81075,7 @@ "qname": "sklearn.ensemble._forest.RandomForestRegressor.__init__.oob_score", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Whether to use out-of-bag samples to estimate the generalization score.\nOnly available if bootstrap=True." @@ -81639,7 +81091,7 @@ "qname": "sklearn.ensemble._forest.RandomForestRegressor.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "The number of jobs to run in parallel. :meth:`fit`, :meth:`predict`,\n:meth:`decision_path` and :meth:`apply` are all parallelized over the\ntrees. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`\ncontext. ``-1`` means using all processors. See :term:`Glossary\n` for more details." @@ -81655,7 +81107,7 @@ "qname": "sklearn.ensemble._forest.RandomForestRegressor.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Controls both the randomness of the bootstrapping of the samples used\nwhen building trees (if ``bootstrap=True``) and the sampling of the\nfeatures to consider when looking for the best split at each node\n(if ``max_features < n_features``).\nSee :term:`Glossary ` for details." @@ -81684,7 +81136,7 @@ "qname": "sklearn.ensemble._forest.RandomForestRegressor.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "Controls the verbosity when fitting and predicting." @@ -81700,7 +81152,7 @@ "qname": "sklearn.ensemble._forest.RandomForestRegressor.__init__.warm_start", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "When set to ``True``, reuse the solution of the previous call to fit\nand add more estimators to the ensemble, otherwise, just fit a whole\nnew forest. See :term:`the Glossary `." @@ -81716,7 +81168,7 @@ "qname": "sklearn.ensemble._forest.RandomForestRegressor.__init__.ccp_alpha", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "non-negative float, default=0.0", "description": "Complexity parameter used for Minimal Cost-Complexity Pruning. The\nsubtree with the largest cost complexity that is smaller than\n``ccp_alpha`` will be chosen. By default, no pruning is performed. See\n:ref:`minimal_cost_complexity_pruning` for details.\n\n.. versionadded:: 0.22" @@ -81732,7 +81184,7 @@ "qname": "sklearn.ensemble._forest.RandomForestRegressor.__init__.max_samples", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or float, default=None", "description": "If bootstrap is True, the number of samples to draw from X\nto train each base estimator.\n\n- If None (default), then draw `X.shape[0]` samples.\n- If int, then draw `max_samples` samples.\n- If float, then draw `max_samples * X.shape[0]` samples. Thus,\n `max_samples` should be in the interval `(0.0, 1.0]`.\n\n.. versionadded:: 0.22" @@ -81761,7 +81213,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -81777,7 +81229,7 @@ "qname": "sklearn.ensemble._forest.RandomTreesEmbedding.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -81790,7 +81242,7 @@ "qname": "sklearn.ensemble._forest.RandomTreesEmbedding.__init__.n_estimators", "default_value": "100", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=100", "description": "Number of trees in the forest.\n\n.. versionchanged:: 0.22\n The default value of ``n_estimators`` changed from 10 to 100\n in 0.22." @@ -81806,7 +81258,7 @@ "qname": "sklearn.ensemble._forest.RandomTreesEmbedding.__init__.max_depth", "default_value": "5", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=5", "description": "The maximum depth of each tree. If None, then nodes are expanded until\nall leaves are pure or until all leaves contain less than\nmin_samples_split samples." @@ -81822,7 +81274,7 @@ "qname": "sklearn.ensemble._forest.RandomTreesEmbedding.__init__.min_samples_split", "default_value": "2", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or float, default=2", "description": "The minimum number of samples required to split an internal node:\n\n- If int, then consider `min_samples_split` as the minimum number.\n- If float, then `min_samples_split` is a fraction and\n `ceil(min_samples_split * n_samples)` is the minimum\n number of samples for each split.\n\n.. versionchanged:: 0.18\n Added float values for fractions." @@ -81847,7 +81299,7 @@ "qname": "sklearn.ensemble._forest.RandomTreesEmbedding.__init__.min_samples_leaf", "default_value": "1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or float, default=1", "description": "The minimum number of samples required to be at a leaf node.\nA split point at any depth will only be considered if it leaves at\nleast ``min_samples_leaf`` training samples in each of the left and\nright branches. This may have the effect of smoothing the model,\nespecially in regression.\n\n- If int, then consider `min_samples_leaf` as the minimum number.\n- If float, then `min_samples_leaf` is a fraction and\n `ceil(min_samples_leaf * n_samples)` is the minimum\n number of samples for each node.\n\n.. versionchanged:: 0.18\n Added float values for fractions." @@ -81872,7 +81324,7 @@ "qname": "sklearn.ensemble._forest.RandomTreesEmbedding.__init__.min_weight_fraction_leaf", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0", "description": "The minimum weighted fraction of the sum total of weights (of all\nthe input samples) required to be at a leaf node. Samples have\nequal weight when sample_weight is not provided." @@ -81888,7 +81340,7 @@ "qname": "sklearn.ensemble._forest.RandomTreesEmbedding.__init__.max_leaf_nodes", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Grow trees with ``max_leaf_nodes`` in best-first fashion.\nBest nodes are defined as relative reduction in impurity.\nIf None then unlimited number of leaf nodes." @@ -81904,7 +81356,7 @@ "qname": "sklearn.ensemble._forest.RandomTreesEmbedding.__init__.min_impurity_decrease", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0", "description": "A node will be split if this split induces a decrease of the impurity\ngreater than or equal to this value.\n\nThe weighted impurity decrease equation is the following::\n\n N_t / N * (impurity - N_t_R / N_t * right_impurity\n - N_t_L / N_t * left_impurity)\n\nwhere ``N`` is the total number of samples, ``N_t`` is the number of\nsamples at the current node, ``N_t_L`` is the number of samples in the\nleft child, and ``N_t_R`` is the number of samples in the right child.\n\n``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,\nif ``sample_weight`` is passed.\n\n.. versionadded:: 0.19" @@ -81920,7 +81372,7 @@ "qname": "sklearn.ensemble._forest.RandomTreesEmbedding.__init__.sparse_output", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether or not to return a sparse CSR matrix, as default behavior,\nor to return a dense array compatible with dense pipeline operators." @@ -81936,7 +81388,7 @@ "qname": "sklearn.ensemble._forest.RandomTreesEmbedding.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "The number of jobs to run in parallel. :meth:`fit`, :meth:`transform`,\n:meth:`decision_path` and :meth:`apply` are all parallelized over the\ntrees. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`\ncontext. ``-1`` means using all processors. See :term:`Glossary\n` for more details." @@ -81952,7 +81404,7 @@ "qname": "sklearn.ensemble._forest.RandomTreesEmbedding.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Controls the generation of the random `y` used to fit the trees\nand the draw of the splits for each feature at the trees' nodes.\nSee :term:`Glossary ` for details." @@ -81981,7 +81433,7 @@ "qname": "sklearn.ensemble._forest.RandomTreesEmbedding.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "Controls the verbosity when fitting and predicting." @@ -81997,7 +81449,7 @@ "qname": "sklearn.ensemble._forest.RandomTreesEmbedding.__init__.warm_start", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "When set to ``True``, reuse the solution of the previous call to fit\nand add more estimators to the ensemble, otherwise, just fit a whole\nnew forest. See :term:`the Glossary `." @@ -82009,7 +81461,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -82076,7 +81528,7 @@ "qname": "sklearn.ensemble._forest.RandomTreesEmbedding.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -82089,7 +81541,7 @@ "qname": "sklearn.ensemble._forest.RandomTreesEmbedding.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The input samples. Use ``dtype=np.float32`` for maximum\nefficiency. Sparse matrices are also supported, use sparse\n``csc_matrix`` for maximum efficiency." @@ -82114,7 +81566,7 @@ "qname": "sklearn.ensemble._forest.RandomTreesEmbedding.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -82130,7 +81582,7 @@ "qname": "sklearn.ensemble._forest.RandomTreesEmbedding.fit.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "Sample weights. If None, then samples are equally weighted. Splits\nthat would create child nodes with net zero or negative weight are\nignored while searching for a split in each node. In the case of\nclassification, splits are also ignored if they would result in any\nsingle class carrying a negative weight in either child node." @@ -82142,7 +81594,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit estimator.", "docstring": "\n Fit estimator.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Use ``dtype=np.float32`` for maximum\n efficiency. Sparse matrices are also supported, use sparse\n ``csc_matrix`` for maximum efficiency.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights. If None, then samples are equally weighted. Splits\n that would create child nodes with net zero or negative weight are\n ignored while searching for a split in each node. In the case of\n classification, splits are also ignored if they would result in any\n single class carrying a negative weight in either child node.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n " }, @@ -82158,7 +81610,7 @@ "qname": "sklearn.ensemble._forest.RandomTreesEmbedding.fit_transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -82171,7 +81623,7 @@ "qname": "sklearn.ensemble._forest.RandomTreesEmbedding.fit_transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Input data used to build forests. Use ``dtype=np.float32`` for\nmaximum efficiency." @@ -82196,7 +81648,7 @@ "qname": "sklearn.ensemble._forest.RandomTreesEmbedding.fit_transform.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -82212,7 +81664,7 @@ "qname": "sklearn.ensemble._forest.RandomTreesEmbedding.fit_transform.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "Sample weights. If None, then samples are equally weighted. Splits\nthat would create child nodes with net zero or negative weight are\nignored while searching for a split in each node. In the case of\nclassification, splits are also ignored if they would result in any\nsingle class carrying a negative weight in either child node." @@ -82224,7 +81676,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit estimator and transform dataset.", "docstring": "\n Fit estimator and transform dataset.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Input data used to build forests. Use ``dtype=np.float32`` for\n maximum efficiency.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights. If None, then samples are equally weighted. Splits\n that would create child nodes with net zero or negative weight are\n ignored while searching for a split in each node. In the case of\n classification, splits are also ignored if they would result in any\n single class carrying a negative weight in either child node.\n\n Returns\n -------\n X_transformed : sparse matrix of shape (n_samples, n_out)\n Transformed dataset.\n " }, @@ -82240,7 +81692,7 @@ "qname": "sklearn.ensemble._forest.RandomTreesEmbedding.get_feature_names_out.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -82253,7 +81705,7 @@ "qname": "sklearn.ensemble._forest.RandomTreesEmbedding.get_feature_names_out.input_features", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of str or None, default=None", "description": "Only used to validate feature names with the names seen in :meth:`fit`." @@ -82274,7 +81726,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Get output feature names for transformation.", "docstring": "Get output feature names for transformation.\n\n Parameters\n ----------\n input_features : array-like of str or None, default=None\n Only used to validate feature names with the names seen in :meth:`fit`.\n\n Returns\n -------\n feature_names_out : ndarray of str objects\n Transformed feature names, in the format of\n `randomtreesembedding_{tree}_{leaf}`, where `tree` is the tree used\n to generate the leaf and `leaf` is the index of a leaf node\n in that tree. Note that the node indexing scheme is used to\n index both nodes with children (split nodes) and leaf nodes.\n Only the latter can be present as output features.\n As a consequence, there are missing indices in the output\n feature names.\n " }, @@ -82290,7 +81742,7 @@ "qname": "sklearn.ensemble._forest.RandomTreesEmbedding.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -82303,7 +81755,7 @@ "qname": "sklearn.ensemble._forest.RandomTreesEmbedding.transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Input data to be transformed. Use ``dtype=np.float32`` for maximum\nefficiency. Sparse matrices are also supported, use sparse\n``csr_matrix`` for maximum efficiency." @@ -82324,7 +81776,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Transform dataset.", "docstring": "\n Transform dataset.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Input data to be transformed. Use ``dtype=np.float32`` for maximum\n efficiency. Sparse matrices are also supported, use sparse\n ``csr_matrix`` for maximum efficiency.\n\n Returns\n -------\n X_transformed : sparse matrix of shape (n_samples, n_out)\n Transformed dataset.\n " }, @@ -83963,7 +83415,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingClassifier.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -83976,14 +83428,14 @@ "qname": "sklearn.ensemble._gb.GradientBoostingClassifier.__init__.loss", "default_value": "'log_loss'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'log_loss', 'deviance', 'exponential'}, default='log_loss'", "description": "The loss function to be optimized. 'log_loss' refers to binomial and\nmultinomial deviance, the same as used in logistic regression.\nIt is a good choice for classification with probabilistic outputs.\nFor loss 'exponential', gradient boosting recovers the AdaBoost algorithm.\n\n.. deprecated:: 1.1\n The loss 'deviance' was deprecated in v1.1 and will be removed in\n version 1.3. Use `loss='log_loss'` which is equivalent." }, "type": { "kind": "EnumType", - "values": ["exponential", "log_loss", "deviance"] + "values": ["deviance", "exponential", "log_loss"] } }, { @@ -83992,7 +83444,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingClassifier.__init__.learning_rate", "default_value": "0.1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.1", "description": "Learning rate shrinks the contribution of each tree by `learning_rate`.\nThere is a trade-off between learning_rate and n_estimators.\nValues must be in the range `(0.0, inf)`." @@ -84008,7 +83460,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingClassifier.__init__.n_estimators", "default_value": "100", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=100", "description": "The number of boosting stages to perform. Gradient boosting\nis fairly robust to over-fitting so a large number usually\nresults in better performance.\nValues must be in the range `[1, inf)`." @@ -84024,7 +83476,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingClassifier.__init__.subsample", "default_value": "1.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1.0", "description": "The fraction of samples to be used for fitting the individual base\nlearners. If smaller than 1.0 this results in Stochastic Gradient\nBoosting. `subsample` interacts with the parameter `n_estimators`.\nChoosing `subsample < 1.0` leads to a reduction of variance\nand an increase in bias.\nValues must be in the range `(0.0, 1.0]`." @@ -84053,14 +83505,14 @@ "qname": "sklearn.ensemble._gb.GradientBoostingClassifier.__init__.criterion", "default_value": "'friedman_mse'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'friedman_mse', 'squared_error', 'mse'}, default='friedman_mse'", "description": "The function to measure the quality of a split. Supported criteria are\n'friedman_mse' for the mean squared error with improvement score by\nFriedman, 'squared_error' for mean squared error. The default value of\n'friedman_mse' is generally the best as it can provide a better\napproximation in some cases.\n\n.. versionadded:: 0.18\n\n.. deprecated:: 1.0\n Criterion 'mse' was deprecated in v1.0 and will be removed in\n version 1.2. Use `criterion='squared_error'` which is equivalent." }, "type": { "kind": "EnumType", - "values": ["squared_error", "mse", "friedman_mse"] + "values": ["friedman_mse", "mse", "squared_error"] } }, { @@ -84069,7 +83521,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingClassifier.__init__.min_samples_split", "default_value": "2", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or float, default=2", "description": "The minimum number of samples required to split an internal node:\n\n- If int, values must be in the range `[2, inf)`.\n- If float, values must be in the range `(0.0, 1.0]` and `min_samples_split`\n will be `ceil(min_samples_split * n_samples)`.\n\n.. versionchanged:: 0.18\n Added float values for fractions." @@ -84102,7 +83554,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingClassifier.__init__.min_samples_leaf", "default_value": "1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or float, default=1", "description": "The minimum number of samples required to be at a leaf node.\nA split point at any depth will only be considered if it leaves at\nleast ``min_samples_leaf`` training samples in each of the left and\nright branches. This may have the effect of smoothing the model,\nespecially in regression.\n\n- If int, values must be in the range `[1, inf)`.\n- If float, values must be in the range `(0.0, 1.0]` and `min_samples_leaf`\n will be `ceil(min_samples_leaf * n_samples)`.\n\n.. versionchanged:: 0.18\n Added float values for fractions." @@ -84135,7 +83587,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingClassifier.__init__.min_weight_fraction_leaf", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0", "description": "The minimum weighted fraction of the sum total of weights (of all\nthe input samples) required to be at a leaf node. Samples have\nequal weight when sample_weight is not provided.\nValues must be in the range `[0.0, 0.5]`." @@ -84164,7 +83616,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingClassifier.__init__.max_depth", "default_value": "3", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=3", "description": "The maximum depth of the individual regression estimators. The maximum\ndepth limits the number of nodes in the tree. Tune this parameter\nfor best performance; the best value depends on the interaction\nof the input variables.\nValues must be in the range `[1, inf)`." @@ -84180,7 +83632,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingClassifier.__init__.min_impurity_decrease", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0", "description": "A node will be split if this split induces a decrease of the impurity\ngreater than or equal to this value.\nValues must be in the range `[0.0, inf)`.\n\nThe weighted impurity decrease equation is the following::\n\n N_t / N * (impurity - N_t_R / N_t * right_impurity\n - N_t_L / N_t * left_impurity)\n\nwhere ``N`` is the total number of samples, ``N_t`` is the number of\nsamples at the current node, ``N_t_L`` is the number of samples in the\nleft child, and ``N_t_R`` is the number of samples in the right child.\n\n``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,\nif ``sample_weight`` is passed.\n\n.. versionadded:: 0.19" @@ -84196,7 +83648,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingClassifier.__init__.init", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "estimator or 'zero', default=None", "description": "An estimator object that is used to compute the initial predictions.\n``init`` has to provide :meth:`fit` and :meth:`predict_proba`. If\n'zero', the initial raw predictions are set to zero. By default, a\n``DummyEstimator`` predicting the classes priors is used." @@ -84221,7 +83673,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingClassifier.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Controls the random seed given to each Tree estimator at each\nboosting iteration.\nIn addition, it controls the random permutation of the features at\neach split (see Notes for more details).\nIt also controls the random splitting of the training data to obtain a\nvalidation set if `n_iter_no_change` is not None.\nPass an int for reproducible output across multiple function calls.\nSee :term:`Glossary `." @@ -84250,7 +83702,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingClassifier.__init__.max_features", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'auto', 'sqrt', 'log2'}, int or float, default=None", "description": "The number of features to consider when looking for the best split:\n\n- If int, values must be in the range `[1, inf)`.\n- If float, values must be in the range `(0.0, 1.0]` and the features\n considered at each split will be `int(max_features * n_features)`.\n- If 'auto', then `max_features=sqrt(n_features)`.\n- If 'sqrt', then `max_features=sqrt(n_features)`.\n- If 'log2', then `max_features=log2(n_features)`.\n- If None, then `max_features=n_features`.\n\nChoosing `max_features < n_features` leads to a reduction of variance\nand an increase in bias.\n\nNote: the search for a split does not stop until at least one\nvalid partition of the node samples is found, even if it requires to\neffectively inspect more than ``max_features`` features." @@ -84268,7 +83720,7 @@ }, { "kind": "EnumType", - "values": ["sqrt", "log2", "auto"] + "values": ["auto", "log2", "sqrt"] }, { "kind": "NamedType", @@ -84287,7 +83739,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingClassifier.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "Enable verbose output. If 1 then it prints progress and performance\nonce in a while (the more trees the lower the frequency). If greater\nthan 1 then it prints progress and performance for every tree.\nValues must be in the range `[0, inf)`." @@ -84303,7 +83755,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingClassifier.__init__.max_leaf_nodes", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Grow trees with ``max_leaf_nodes`` in best-first fashion.\nBest nodes are defined as relative reduction in impurity.\nValues must be in the range `[2, inf)`.\nIf `None`, then unlimited number of leaf nodes." @@ -84319,7 +83771,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingClassifier.__init__.warm_start", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "When set to ``True``, reuse the solution of the previous call to fit\nand add more estimators to the ensemble, otherwise, just erase the\nprevious solution. See :term:`the Glossary `." @@ -84335,7 +83787,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingClassifier.__init__.validation_fraction", "default_value": "0.1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.1", "description": "The proportion of training data to set aside as validation set for\nearly stopping. Values must be in the range `(0.0, 1.0)`.\nOnly used if ``n_iter_no_change`` is set to an integer.\n\n.. versionadded:: 0.20" @@ -84364,7 +83816,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingClassifier.__init__.n_iter_no_change", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "``n_iter_no_change`` is used to decide if early stopping will be used\nto terminate training when validation score is not improving. By\ndefault it is set to None to disable early stopping. If set to a\nnumber, it will set aside ``validation_fraction`` size of the training\ndata as validation and terminate training when validation score is not\nimproving in all of the previous ``n_iter_no_change`` numbers of\niterations. The split is stratified.\nValues must be in the range `[1, inf)`.\n\n.. versionadded:: 0.20" @@ -84380,7 +83832,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingClassifier.__init__.tol", "default_value": "0.0001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-4", "description": "Tolerance for the early stopping. When the loss is not improving\nby at least tol for ``n_iter_no_change`` iterations (if set to a\nnumber), the training stops.\nValues must be in the range `(0.0, inf)`.\n\n.. versionadded:: 0.20" @@ -84396,7 +83848,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingClassifier.__init__.ccp_alpha", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "non-negative float, default=0.0", "description": "Complexity parameter used for Minimal Cost-Complexity Pruning. The\nsubtree with the largest cost complexity that is smaller than\n``ccp_alpha`` will be chosen. By default, no pruning is performed.\nValues must be in the range `[0.0, inf)`.\nSee :ref:`minimal_cost_complexity_pruning` for details.\n\n.. versionadded:: 0.22" @@ -84408,7 +83860,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -84475,7 +83927,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingClassifier.decision_function.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -84488,7 +83940,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingClassifier.decision_function.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The input samples. Internally, it will be converted to\n``dtype=np.float32`` and if a sparse matrix is provided\nto a sparse ``csr_matrix``." @@ -84509,7 +83961,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute the decision function of ``X``.", "docstring": "Compute the decision function of ``X``.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csr_matrix``.\n\n Returns\n -------\n score : ndarray of shape (n_samples, n_classes) or (n_samples,)\n The decision function of the input samples, which corresponds to\n the raw values predicted from the trees of the ensemble . The\n order of the classes corresponds to that in the attribute\n :term:`classes_`. Regression and binary classification produce an\n array of shape (n_samples,).\n " }, @@ -84525,7 +83977,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingClassifier.predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -84538,7 +83990,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingClassifier.predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The input samples. Internally, it will be converted to\n``dtype=np.float32`` and if a sparse matrix is provided\nto a sparse ``csr_matrix``." @@ -84559,7 +84011,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict class for X.", "docstring": "Predict class for X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csr_matrix``.\n\n Returns\n -------\n y : ndarray of shape (n_samples,)\n The predicted values.\n " }, @@ -84575,7 +84027,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingClassifier.predict_log_proba.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -84588,7 +84040,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingClassifier.predict_log_proba.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The input samples. Internally, it will be converted to\n``dtype=np.float32`` and if a sparse matrix is provided\nto a sparse ``csr_matrix``." @@ -84609,7 +84061,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict class log-probabilities for X.", "docstring": "Predict class log-probabilities for X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csr_matrix``.\n\n Returns\n -------\n p : ndarray of shape (n_samples, n_classes)\n The class log-probabilities of the input samples. The order of the\n classes corresponds to that in the attribute :term:`classes_`.\n\n Raises\n ------\n AttributeError\n If the ``loss`` does not support probabilities.\n " }, @@ -84625,7 +84077,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingClassifier.predict_proba.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -84638,7 +84090,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingClassifier.predict_proba.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The input samples. Internally, it will be converted to\n``dtype=np.float32`` and if a sparse matrix is provided\nto a sparse ``csr_matrix``." @@ -84659,7 +84111,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict class probabilities for X.", "docstring": "Predict class probabilities for X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csr_matrix``.\n\n Returns\n -------\n p : ndarray of shape (n_samples, n_classes)\n The class probabilities of the input samples. The order of the\n classes corresponds to that in the attribute :term:`classes_`.\n\n Raises\n ------\n AttributeError\n If the ``loss`` does not support probabilities.\n " }, @@ -84675,7 +84127,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingClassifier.staged_decision_function.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -84688,7 +84140,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingClassifier.staged_decision_function.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The input samples. Internally, it will be converted to\n``dtype=np.float32`` and if a sparse matrix is provided\nto a sparse ``csr_matrix``." @@ -84709,7 +84161,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute decision function of ``X`` for each iteration.\n\nThis method allows monitoring (i.e. determine error on testing set)\nafter each stage.", "docstring": "Compute decision function of ``X`` for each iteration.\n\n This method allows monitoring (i.e. determine error on testing set)\n after each stage.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csr_matrix``.\n\n Yields\n ------\n score : generator of ndarray of shape (n_samples, k)\n The decision function of the input samples, which corresponds to\n the raw values predicted from the trees of the ensemble . The\n classes corresponds to that in the attribute :term:`classes_`.\n Regression and binary classification are special cases with\n ``k == 1``, otherwise ``k==n_classes``.\n " }, @@ -84725,7 +84177,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingClassifier.staged_predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -84738,7 +84190,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingClassifier.staged_predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The input samples. Internally, it will be converted to\n``dtype=np.float32`` and if a sparse matrix is provided\nto a sparse ``csr_matrix``." @@ -84759,7 +84211,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict class at each stage for X.\n\nThis method allows monitoring (i.e. determine error on testing set)\nafter each stage.", "docstring": "Predict class at each stage for X.\n\n This method allows monitoring (i.e. determine error on testing set)\n after each stage.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csr_matrix``.\n\n Yields\n ------\n y : generator of ndarray of shape (n_samples,)\n The predicted value of the input samples.\n " }, @@ -84775,7 +84227,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingClassifier.staged_predict_proba.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -84788,7 +84240,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingClassifier.staged_predict_proba.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The input samples. Internally, it will be converted to\n``dtype=np.float32`` and if a sparse matrix is provided\nto a sparse ``csr_matrix``." @@ -84809,7 +84261,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict class probabilities at each stage for X.\n\nThis method allows monitoring (i.e. determine error on testing set)\nafter each stage.", "docstring": "Predict class probabilities at each stage for X.\n\n This method allows monitoring (i.e. determine error on testing set)\n after each stage.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csr_matrix``.\n\n Yields\n ------\n y : generator of ndarray of shape (n_samples,)\n The predicted value of the input samples.\n " }, @@ -84825,7 +84277,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingRegressor.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -84838,14 +84290,14 @@ "qname": "sklearn.ensemble._gb.GradientBoostingRegressor.__init__.loss", "default_value": "'squared_error'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'squared_error', 'absolute_error', 'huber', 'quantile'}, default='squared_error'", "description": "Loss function to be optimized. 'squared_error' refers to the squared\nerror for regression. 'absolute_error' refers to the absolute error of\nregression and is a robust loss function. 'huber' is a\ncombination of the two. 'quantile' allows quantile regression (use\n`alpha` to specify the quantile).\n\n.. deprecated:: 1.0\n The loss 'ls' was deprecated in v1.0 and will be removed in\n version 1.2. Use `loss='squared_error'` which is equivalent.\n\n.. deprecated:: 1.0\n The loss 'lad' was deprecated in v1.0 and will be removed in\n version 1.2. Use `loss='absolute_error'` which is equivalent." }, "type": { "kind": "EnumType", - "values": ["quantile", "squared_error", "absolute_error", "huber"] + "values": ["quantile", "huber", "squared_error", "absolute_error"] } }, { @@ -84854,7 +84306,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingRegressor.__init__.learning_rate", "default_value": "0.1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.1", "description": "Learning rate shrinks the contribution of each tree by `learning_rate`.\nThere is a trade-off between learning_rate and n_estimators.\nValues must be in the range `(0.0, inf)`." @@ -84870,7 +84322,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingRegressor.__init__.n_estimators", "default_value": "100", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=100", "description": "The number of boosting stages to perform. Gradient boosting\nis fairly robust to over-fitting so a large number usually\nresults in better performance.\nValues must be in the range `[1, inf)`." @@ -84886,7 +84338,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingRegressor.__init__.subsample", "default_value": "1.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1.0", "description": "The fraction of samples to be used for fitting the individual base\nlearners. If smaller than 1.0 this results in Stochastic Gradient\nBoosting. `subsample` interacts with the parameter `n_estimators`.\nChoosing `subsample < 1.0` leads to a reduction of variance\nand an increase in bias.\nValues must be in the range `(0.0, 1.0]`." @@ -84915,14 +84367,14 @@ "qname": "sklearn.ensemble._gb.GradientBoostingRegressor.__init__.criterion", "default_value": "'friedman_mse'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'friedman_mse', 'squared_error', 'mse'}, default='friedman_mse'", "description": "The function to measure the quality of a split. Supported criteria are\n\"friedman_mse\" for the mean squared error with improvement score by\nFriedman, \"squared_error\" for mean squared error. The default value of\n\"friedman_mse\" is generally the best as it can provide a better\napproximation in some cases.\n\n.. versionadded:: 0.18\n\n.. deprecated:: 1.0\n Criterion 'mse' was deprecated in v1.0 and will be removed in\n version 1.2. Use `criterion='squared_error'` which is equivalent." }, "type": { "kind": "EnumType", - "values": ["squared_error", "mse", "friedman_mse"] + "values": ["friedman_mse", "mse", "squared_error"] } }, { @@ -84931,7 +84383,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingRegressor.__init__.min_samples_split", "default_value": "2", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or float, default=2", "description": "The minimum number of samples required to split an internal node:\n\n- If int, values must be in the range `[2, inf)`.\n- If float, values must be in the range `(0.0, 1.0]` and `min_samples_split`\n will be `ceil(min_samples_split * n_samples)`.\n\n.. versionchanged:: 0.18\n Added float values for fractions." @@ -84964,7 +84416,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingRegressor.__init__.min_samples_leaf", "default_value": "1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or float, default=1", "description": "The minimum number of samples required to be at a leaf node.\nA split point at any depth will only be considered if it leaves at\nleast ``min_samples_leaf`` training samples in each of the left and\nright branches. This may have the effect of smoothing the model,\nespecially in regression.\n\n- If int, values must be in the range `[1, inf)`.\n- If float, values must be in the range `(0.0, 1.0]` and `min_samples_leaf`\n will be `ceil(min_samples_leaf * n_samples)`.\n\n.. versionchanged:: 0.18\n Added float values for fractions." @@ -84997,7 +84449,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingRegressor.__init__.min_weight_fraction_leaf", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0", "description": "The minimum weighted fraction of the sum total of weights (of all\nthe input samples) required to be at a leaf node. Samples have\nequal weight when sample_weight is not provided.\nValues must be in the range `[0.0, 0.5]`." @@ -85026,7 +84478,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingRegressor.__init__.max_depth", "default_value": "3", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=3", "description": "Maximum depth of the individual regression estimators. The maximum\ndepth limits the number of nodes in the tree. Tune this parameter\nfor best performance; the best value depends on the interaction\nof the input variables.\nValues must be in the range `[1, inf)`." @@ -85042,7 +84494,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingRegressor.__init__.min_impurity_decrease", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0", "description": "A node will be split if this split induces a decrease of the impurity\ngreater than or equal to this value.\nValues must be in the range `[0.0, inf)`.\n\nThe weighted impurity decrease equation is the following::\n\n N_t / N * (impurity - N_t_R / N_t * right_impurity\n - N_t_L / N_t * left_impurity)\n\nwhere ``N`` is the total number of samples, ``N_t`` is the number of\nsamples at the current node, ``N_t_L`` is the number of samples in the\nleft child, and ``N_t_R`` is the number of samples in the right child.\n\n``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,\nif ``sample_weight`` is passed.\n\n.. versionadded:: 0.19" @@ -85058,7 +84510,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingRegressor.__init__.init", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "estimator or 'zero', default=None", "description": "An estimator object that is used to compute the initial predictions.\n``init`` has to provide :term:`fit` and :term:`predict`. If 'zero', the\ninitial raw predictions are set to zero. By default a\n``DummyEstimator`` is used, predicting either the average target value\n(for loss='squared_error'), or a quantile for the other losses." @@ -85083,7 +84535,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingRegressor.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Controls the random seed given to each Tree estimator at each\nboosting iteration.\nIn addition, it controls the random permutation of the features at\neach split (see Notes for more details).\nIt also controls the random splitting of the training data to obtain a\nvalidation set if `n_iter_no_change` is not None.\nPass an int for reproducible output across multiple function calls.\nSee :term:`Glossary `." @@ -85112,7 +84564,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingRegressor.__init__.max_features", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'auto', 'sqrt', 'log2'}, int or float, default=None", "description": "The number of features to consider when looking for the best split:\n\n- If int, values must be in the range `[1, inf)`.\n- If float, values must be in the range `(0.0, 1.0]` and the features\n considered at each split will be `int(max_features * n_features)`.\n- If \"auto\", then `max_features=n_features`.\n- If \"sqrt\", then `max_features=sqrt(n_features)`.\n- If \"log2\", then `max_features=log2(n_features)`.\n- If None, then `max_features=n_features`.\n\nChoosing `max_features < n_features` leads to a reduction of variance\nand an increase in bias.\n\nNote: the search for a split does not stop until at least one\nvalid partition of the node samples is found, even if it requires to\neffectively inspect more than ``max_features`` features." @@ -85130,7 +84582,7 @@ }, { "kind": "EnumType", - "values": ["sqrt", "log2", "auto"] + "values": ["auto", "log2", "sqrt"] }, { "kind": "NamedType", @@ -85149,7 +84601,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingRegressor.__init__.alpha", "default_value": "0.9", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.9", "description": "The alpha-quantile of the huber loss function and the quantile\nloss function. Only if ``loss='huber'`` or ``loss='quantile'``.\nValues must be in the range `(0.0, 1.0)`." @@ -85178,7 +84630,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingRegressor.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "Enable verbose output. If 1 then it prints progress and performance\nonce in a while (the more trees the lower the frequency). If greater\nthan 1 then it prints progress and performance for every tree.\nValues must be in the range `[0, inf)`." @@ -85194,7 +84646,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingRegressor.__init__.max_leaf_nodes", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Grow trees with ``max_leaf_nodes`` in best-first fashion.\nBest nodes are defined as relative reduction in impurity.\nValues must be in the range `[2, inf)`.\nIf None, then unlimited number of leaf nodes." @@ -85210,7 +84662,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingRegressor.__init__.warm_start", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "When set to ``True``, reuse the solution of the previous call to fit\nand add more estimators to the ensemble, otherwise, just erase the\nprevious solution. See :term:`the Glossary `." @@ -85226,7 +84678,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingRegressor.__init__.validation_fraction", "default_value": "0.1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.1", "description": "The proportion of training data to set aside as validation set for\nearly stopping. Values must be in the range `(0.0, 1.0)`.\nOnly used if ``n_iter_no_change`` is set to an integer.\n\n.. versionadded:: 0.20" @@ -85255,7 +84707,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingRegressor.__init__.n_iter_no_change", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "``n_iter_no_change`` is used to decide if early stopping will be used\nto terminate training when validation score is not improving. By\ndefault it is set to None to disable early stopping. If set to a\nnumber, it will set aside ``validation_fraction`` size of the training\ndata as validation and terminate training when validation score is not\nimproving in all of the previous ``n_iter_no_change`` numbers of\niterations.\nValues must be in the range `[1, inf)`.\n\n.. versionadded:: 0.20" @@ -85271,7 +84723,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingRegressor.__init__.tol", "default_value": "0.0001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-4", "description": "Tolerance for the early stopping. When the loss is not improving\nby at least tol for ``n_iter_no_change`` iterations (if set to a\nnumber), the training stops.\nValues must be in the range `(0.0, inf)`.\n\n.. versionadded:: 0.20" @@ -85287,7 +84739,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingRegressor.__init__.ccp_alpha", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "non-negative float, default=0.0", "description": "Complexity parameter used for Minimal Cost-Complexity Pruning. The\nsubtree with the largest cost complexity that is smaller than\n``ccp_alpha`` will be chosen. By default, no pruning is performed.\nValues must be in the range `[0.0, inf)`.\nSee :ref:`minimal_cost_complexity_pruning` for details.\n\n.. versionadded:: 0.22" @@ -85299,7 +84751,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -85366,7 +84818,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingRegressor.apply.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -85379,7 +84831,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingRegressor.apply.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The input samples. Internally, its dtype will be converted to\n``dtype=np.float32``. If a sparse matrix is provided, it will\nbe converted to a sparse ``csr_matrix``." @@ -85400,7 +84852,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Apply trees in the ensemble to X, return leaf indices.\n\n.. versionadded:: 0.17", "docstring": "Apply trees in the ensemble to X, return leaf indices.\n\n .. versionadded:: 0.17\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, its dtype will be converted to\n ``dtype=np.float32``. If a sparse matrix is provided, it will\n be converted to a sparse ``csr_matrix``.\n\n Returns\n -------\n X_leaves : array-like of shape (n_samples, n_estimators)\n For each datapoint x in X and for each tree in the ensemble,\n return the index of the leaf x ends up in each estimator.\n " }, @@ -85416,7 +84868,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingRegressor.predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -85429,7 +84881,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingRegressor.predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The input samples. Internally, it will be converted to\n``dtype=np.float32`` and if a sparse matrix is provided\nto a sparse ``csr_matrix``." @@ -85450,7 +84902,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict regression target for X.", "docstring": "Predict regression target for X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csr_matrix``.\n\n Returns\n -------\n y : ndarray of shape (n_samples,)\n The predicted values.\n " }, @@ -85466,7 +84918,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingRegressor.staged_predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -85479,7 +84931,7 @@ "qname": "sklearn.ensemble._gb.GradientBoostingRegressor.staged_predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The input samples. Internally, it will be converted to\n``dtype=np.float32`` and if a sparse matrix is provided\nto a sparse ``csr_matrix``." @@ -85500,7 +84952,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict regression target at each stage for X.\n\nThis method allows monitoring (i.e. determine error on testing set)\nafter each stage.", "docstring": "Predict regression target at each stage for X.\n\n This method allows monitoring (i.e. determine error on testing set)\n after each stage.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csr_matrix``.\n\n Yields\n ------\n y : generator of ndarray of shape (n_samples,)\n The predicted value of the input samples.\n " }, @@ -90800,7 +90252,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingClassifier.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -90813,14 +90265,14 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingClassifier.__init__.loss", "default_value": "'log_loss'", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{'log_loss', 'auto', 'binary_crossentropy', 'categorical_crossentropy'}, default='log_loss'", "description": "The loss function to use in the boosting process.\n\nFor binary classification problems, 'log_loss' is also known as logistic loss,\nbinomial deviance or binary crossentropy. Internally, the model fits one tree\nper boosting iteration and uses the logistic sigmoid function (expit) as\ninverse link function to compute the predicted positive class probability.\n\nFor multiclass classification problems, 'log_loss' is also known as multinomial\ndeviance or categorical crossentropy. Internally, the model fits one tree per\nboosting iteration and per class and uses the softmax function as inverse link\nfunction to compute the predicted probabilities of the classes.\n\n.. deprecated:: 1.1\n The loss arguments 'auto', 'binary_crossentropy' and\n 'categorical_crossentropy' were deprecated in v1.1 and will be removed in\n version 1.3. Use `loss='log_loss'` which is equivalent." }, "type": { "kind": "EnumType", - "values": ["auto", "categorical_crossentropy", "log_loss", "binary_crossentropy"] + "values": ["categorical_crossentropy", "auto", "log_loss", "binary_crossentropy"] } }, { @@ -90829,7 +90281,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingClassifier.__init__.learning_rate", "default_value": "0.1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.1", "description": "The learning rate, also known as *shrinkage*. This is used as a\nmultiplicative factor for the leaves values. Use ``1`` for no\nshrinkage." @@ -90845,7 +90297,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingClassifier.__init__.max_iter", "default_value": "100", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=100", "description": "The maximum number of iterations of the boosting process, i.e. the\nmaximum number of trees for binary classification. For multiclass\nclassification, `n_classes` trees per iteration are built." @@ -90861,7 +90313,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingClassifier.__init__.max_leaf_nodes", "default_value": "31", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or None, default=31", "description": "The maximum number of leaves for each tree. Must be strictly greater\nthan 1. If None, there is no maximum limit." @@ -90886,7 +90338,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingClassifier.__init__.max_depth", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or None, default=None", "description": "The maximum depth of each tree. The depth of a tree is the number of\nedges to go from the root to the deepest leaf.\nDepth isn't constrained by default." @@ -90911,7 +90363,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingClassifier.__init__.min_samples_leaf", "default_value": "20", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=20", "description": "The minimum number of samples per leaf. For small datasets with less\nthan a few hundred samples, it is recommended to lower this value\nsince only very shallow trees would be built." @@ -90927,7 +90379,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingClassifier.__init__.l2_regularization", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0", "description": "The L2 regularization parameter. Use 0 for no regularization." @@ -90943,7 +90395,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingClassifier.__init__.max_bins", "default_value": "255", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=255", "description": "The maximum number of bins to use for non-missing values. Before\ntraining, each feature of the input array `X` is binned into\ninteger-valued bins, which allows for a much faster training stage.\nFeatures with a small number of unique values may use less than\n``max_bins`` bins. In addition to the ``max_bins`` bins, one more bin\nis always reserved for missing values. Must be no larger than 255." @@ -90959,7 +90411,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingClassifier.__init__.categorical_features", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of {bool, int} of shape (n_features) or shape (n_categorical_features,), default=None", "description": "Indicates the categorical features.\n\n- None : no feature will be considered categorical.\n- boolean array-like : boolean mask indicating categorical features.\n- integer array-like : integer indices indicating categorical\n features.\n\nFor each categorical feature, there must be at most `max_bins` unique\ncategories, and each categorical value must be in [0, max_bins -1].\n\nRead more in the :ref:`User Guide `.\n\n.. versionadded:: 0.24" @@ -90988,7 +90440,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingClassifier.__init__.monotonic_cst", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of int of shape (n_features), default=None", "description": "Indicates the monotonic constraint to enforce on each feature. -1, 1\nand 0 respectively correspond to a negative constraint, positive\nconstraint and no constraint. Read more in the :ref:`User Guide\n`.\n\n.. versionadded:: 0.23" @@ -91004,7 +90456,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingClassifier.__init__.warm_start", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "When set to ``True``, reuse the solution of the previous call to fit\nand add more estimators to the ensemble. For results to be valid, the\nestimator should be re-trained on the same data only.\nSee :term:`the Glossary `." @@ -91020,7 +90472,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingClassifier.__init__.early_stopping", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "'auto' or bool, default='auto'", "description": "If 'auto', early stopping is enabled if the sample size is larger than\n10000. If True, early stopping is enabled, otherwise early stopping is\ndisabled.\n\n.. versionadded:: 0.23" @@ -91045,7 +90497,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingClassifier.__init__.scoring", "default_value": "'loss'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str or callable or None, default='loss'", "description": "Scoring parameter to use for early stopping. It can be a single\nstring (see :ref:`scoring_parameter`) or a callable (see\n:ref:`scoring`). If None, the estimator's default scorer\nis used. If ``scoring='loss'``, early stopping is checked\nw.r.t the loss value. Only used if early stopping is performed." @@ -91074,7 +90526,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingClassifier.__init__.validation_fraction", "default_value": "0.1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or float or None, default=0.1", "description": "Proportion (or absolute size) of training data to set aside as\nvalidation data for early stopping. If None, early stopping is done on\nthe training data. Only used if early stopping is performed." @@ -91103,7 +90555,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingClassifier.__init__.n_iter_no_change", "default_value": "10", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=10", "description": "Used to determine when to \"early stop\". The fitting process is\nstopped when none of the last ``n_iter_no_change`` scores are better\nthan the ``n_iter_no_change - 1`` -th-to-last one, up to some\ntolerance. Only used if early stopping is performed." @@ -91119,7 +90571,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingClassifier.__init__.tol", "default_value": "1e-07", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-7", "description": "The absolute tolerance to use when comparing scores. The higher the\ntolerance, the more likely we are to early stop: higher tolerance\nmeans that it will be harder for subsequent iterations to be\nconsidered an improvement upon the reference score." @@ -91135,7 +90587,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingClassifier.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "The verbosity level. If not zero, print some information about the\nfitting process." @@ -91151,7 +90603,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingClassifier.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Pseudo-random number generator to control the subsampling in the\nbinning process, and the train/validation data split if early stopping\nis enabled.\nPass an int for reproducible output across multiple function calls.\nSee :term:`Glossary `." @@ -91176,7 +90628,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -91268,7 +90720,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingClassifier.decision_function.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -91281,7 +90733,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingClassifier.decision_function.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like, shape (n_samples, n_features)", "description": "The input samples." @@ -91302,7 +90754,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute the decision function of ``X``.", "docstring": "Compute the decision function of ``X``.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n The input samples.\n\n Returns\n -------\n decision : ndarray, shape (n_samples,) or (n_samples, n_trees_per_iteration)\n The raw predicted values (i.e. the sum of the trees leaves) for\n each sample. n_trees_per_iteration is equal to the number of\n classes in multiclass classification.\n " }, @@ -91318,7 +90770,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingClassifier.predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -91331,7 +90783,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingClassifier.predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like, shape (n_samples, n_features)", "description": "The input samples." @@ -91352,7 +90804,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict classes for X.", "docstring": "Predict classes for X.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n The input samples.\n\n Returns\n -------\n y : ndarray, shape (n_samples,)\n The predicted classes.\n " }, @@ -91368,7 +90820,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingClassifier.predict_proba.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -91381,7 +90833,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingClassifier.predict_proba.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like, shape (n_samples, n_features)", "description": "The input samples." @@ -91402,7 +90854,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict class probabilities for X.", "docstring": "Predict class probabilities for X.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n The input samples.\n\n Returns\n -------\n p : ndarray, shape (n_samples, n_classes)\n The class probabilities of the input samples.\n " }, @@ -91418,7 +90870,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingClassifier.staged_decision_function.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -91431,7 +90883,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingClassifier.staged_decision_function.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "The input samples." @@ -91443,7 +90895,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute decision function of ``X`` for each iteration.\n\nThis method allows monitoring (i.e. determine error on testing set)\nafter each stage.", "docstring": "Compute decision function of ``X`` for each iteration.\n\n This method allows monitoring (i.e. determine error on testing set)\n after each stage.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The input samples.\n\n Yields\n ------\n decision : generator of ndarray of shape (n_samples,) or (n_samples, n_trees_per_iteration)\n The decision function of the input samples, which corresponds to\n the raw values predicted from the trees of the ensemble . The\n classes corresponds to that in the attribute :term:`classes_`.\n " }, @@ -91459,7 +90911,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingClassifier.staged_predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -91472,7 +90924,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingClassifier.staged_predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "The input samples." @@ -91484,7 +90936,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict classes at each iteration.\n\nThis method allows monitoring (i.e. determine error on testing set)\nafter each stage.\n\n.. versionadded:: 0.24", "docstring": "Predict classes at each iteration.\n\n This method allows monitoring (i.e. determine error on testing set)\n after each stage.\n\n .. versionadded:: 0.24\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The input samples.\n\n Yields\n ------\n y : generator of ndarray of shape (n_samples,)\n The predicted classes of the input samples, for each iteration.\n " }, @@ -91500,7 +90952,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingClassifier.staged_predict_proba.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -91513,7 +90965,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingClassifier.staged_predict_proba.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "The input samples." @@ -91525,7 +90977,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict class probabilities at each iteration.\n\nThis method allows monitoring (i.e. determine error on testing set)\nafter each stage.", "docstring": "Predict class probabilities at each iteration.\n\n This method allows monitoring (i.e. determine error on testing set)\n after each stage.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The input samples.\n\n Yields\n ------\n y : generator of ndarray of shape (n_samples,)\n The predicted class probabilities of the input samples,\n for each iteration.\n " }, @@ -91541,7 +90993,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingRegressor.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -91554,14 +91006,14 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingRegressor.__init__.loss", "default_value": "'squared_error'", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{'squared_error', 'absolute_error', 'poisson', 'quantile'}, default='squared_error'", "description": "The loss function to use in the boosting process. Note that the\n\"squared error\" and \"poisson\" losses actually implement\n\"half least squares loss\" and \"half poisson deviance\" to simplify the\ncomputation of the gradient. Furthermore, \"poisson\" loss internally\nuses a log-link and requires ``y >= 0``.\n\"quantile\" uses the pinball loss.\n\n.. versionchanged:: 0.23\n Added option 'poisson'.\n\n.. versionchanged:: 1.1\n Added option 'quantile'.\n\n.. deprecated:: 1.0\n The loss 'least_squares' was deprecated in v1.0 and will be removed\n in version 1.2. Use `loss='squared_error'` which is equivalent.\n\n.. deprecated:: 1.0\n The loss 'least_absolute_deviation' was deprecated in v1.0 and will\n be removed in version 1.2. Use `loss='absolute_error'` which is\n equivalent." }, "type": { "kind": "EnumType", - "values": ["poisson", "squared_error", "absolute_error", "quantile"] + "values": ["quantile", "squared_error", "poisson", "absolute_error"] } }, { @@ -91570,7 +91022,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingRegressor.__init__.quantile", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=None", "description": "If loss is \"quantile\", this parameter specifies which quantile to be estimated\nand must be between 0 and 1." @@ -91586,7 +91038,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingRegressor.__init__.learning_rate", "default_value": "0.1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.1", "description": "The learning rate, also known as *shrinkage*. This is used as a\nmultiplicative factor for the leaves values. Use ``1`` for no\nshrinkage." @@ -91602,7 +91054,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingRegressor.__init__.max_iter", "default_value": "100", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=100", "description": "The maximum number of iterations of the boosting process, i.e. the\nmaximum number of trees." @@ -91618,7 +91070,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingRegressor.__init__.max_leaf_nodes", "default_value": "31", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or None, default=31", "description": "The maximum number of leaves for each tree. Must be strictly greater\nthan 1. If None, there is no maximum limit." @@ -91643,7 +91095,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingRegressor.__init__.max_depth", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or None, default=None", "description": "The maximum depth of each tree. The depth of a tree is the number of\nedges to go from the root to the deepest leaf.\nDepth isn't constrained by default." @@ -91668,7 +91120,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingRegressor.__init__.min_samples_leaf", "default_value": "20", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=20", "description": "The minimum number of samples per leaf. For small datasets with less\nthan a few hundred samples, it is recommended to lower this value\nsince only very shallow trees would be built." @@ -91684,7 +91136,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingRegressor.__init__.l2_regularization", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0", "description": "The L2 regularization parameter. Use ``0`` for no regularization\n(default)." @@ -91700,7 +91152,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingRegressor.__init__.max_bins", "default_value": "255", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=255", "description": "The maximum number of bins to use for non-missing values. Before\ntraining, each feature of the input array `X` is binned into\ninteger-valued bins, which allows for a much faster training stage.\nFeatures with a small number of unique values may use less than\n``max_bins`` bins. In addition to the ``max_bins`` bins, one more bin\nis always reserved for missing values. Must be no larger than 255." @@ -91716,7 +91168,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingRegressor.__init__.categorical_features", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of {bool, int} of shape (n_features) or shape (n_categorical_features,), default=None", "description": "Indicates the categorical features.\n\n- None : no feature will be considered categorical.\n- boolean array-like : boolean mask indicating categorical features.\n- integer array-like : integer indices indicating categorical\n features.\n\nFor each categorical feature, there must be at most `max_bins` unique\ncategories, and each categorical value must be in [0, max_bins -1].\n\nRead more in the :ref:`User Guide `.\n\n.. versionadded:: 0.24" @@ -91745,7 +91197,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingRegressor.__init__.monotonic_cst", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of int of shape (n_features), default=None", "description": "Indicates the monotonic constraint to enforce on each feature. -1, 1\nand 0 respectively correspond to a negative constraint, positive\nconstraint and no constraint. Read more in the :ref:`User Guide\n`.\n\n.. versionadded:: 0.23" @@ -91761,7 +91213,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingRegressor.__init__.warm_start", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "When set to ``True``, reuse the solution of the previous call to fit\nand add more estimators to the ensemble. For results to be valid, the\nestimator should be re-trained on the same data only.\nSee :term:`the Glossary `." @@ -91777,7 +91229,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingRegressor.__init__.early_stopping", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "'auto' or bool, default='auto'", "description": "If 'auto', early stopping is enabled if the sample size is larger than\n10000. If True, early stopping is enabled, otherwise early stopping is\ndisabled.\n\n.. versionadded:: 0.23" @@ -91802,7 +91254,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingRegressor.__init__.scoring", "default_value": "'loss'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str or callable or None, default='loss'", "description": "Scoring parameter to use for early stopping. It can be a single\nstring (see :ref:`scoring_parameter`) or a callable (see\n:ref:`scoring`). If None, the estimator's default scorer is used. If\n``scoring='loss'``, early stopping is checked w.r.t the loss value.\nOnly used if early stopping is performed." @@ -91831,7 +91283,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingRegressor.__init__.validation_fraction", "default_value": "0.1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or float or None, default=0.1", "description": "Proportion (or absolute size) of training data to set aside as\nvalidation data for early stopping. If None, early stopping is done on\nthe training data. Only used if early stopping is performed." @@ -91860,7 +91312,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingRegressor.__init__.n_iter_no_change", "default_value": "10", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=10", "description": "Used to determine when to \"early stop\". The fitting process is\nstopped when none of the last ``n_iter_no_change`` scores are better\nthan the ``n_iter_no_change - 1`` -th-to-last one, up to some\ntolerance. Only used if early stopping is performed." @@ -91876,7 +91328,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingRegressor.__init__.tol", "default_value": "1e-07", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-7", "description": "The absolute tolerance to use when comparing scores during early\nstopping. The higher the tolerance, the more likely we are to early\nstop: higher tolerance means that it will be harder for subsequent\niterations to be considered an improvement upon the reference score." @@ -91892,7 +91344,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingRegressor.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "The verbosity level. If not zero, print some information about the\nfitting process." @@ -91908,7 +91360,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingRegressor.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Pseudo-random number generator to control the subsampling in the\nbinning process, and the train/validation data split if early stopping\nis enabled.\nPass an int for reproducible output across multiple function calls.\nSee :term:`Glossary `." @@ -91933,7 +91385,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -92025,7 +91477,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingRegressor.predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -92038,7 +91490,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingRegressor.predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like, shape (n_samples, n_features)", "description": "The input samples." @@ -92059,7 +91511,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict values for X.", "docstring": "Predict values for X.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n The input samples.\n\n Returns\n -------\n y : ndarray, shape (n_samples,)\n The predicted values.\n " }, @@ -92075,7 +91527,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingRegressor.staged_predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -92088,7 +91540,7 @@ "qname": "sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingRegressor.staged_predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "The input samples." @@ -92100,7 +91552,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict regression target for each iteration.\n\nThis method allows monitoring (i.e. determine error on testing set)\nafter each stage.\n\n.. versionadded:: 0.24", "docstring": "Predict regression target for each iteration.\n\n This method allows monitoring (i.e. determine error on testing set)\n after each stage.\n\n .. versionadded:: 0.24\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The input samples.\n\n Yields\n ------\n y : generator of ndarray of shape (n_samples,)\n The predicted values of the input samples, for each iteration.\n " }, @@ -93651,7 +93103,7 @@ "qname": "sklearn.ensemble._iforest.IsolationForest.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -93664,7 +93116,7 @@ "qname": "sklearn.ensemble._iforest.IsolationForest.__init__.n_estimators", "default_value": "100", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=100", "description": "The number of base estimators in the ensemble." @@ -93680,7 +93132,7 @@ "qname": "sklearn.ensemble._iforest.IsolationForest.__init__.max_samples", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "\"auto\", int or float, default=\"auto\"", "description": "The number of samples to draw from X to train each base estimator.\n - If int, then draw `max_samples` samples.\n - If float, then draw `max_samples * X.shape[0]` samples.\n - If \"auto\", then `max_samples=min(256, n_samples)`.\n\nIf max_samples is larger than the number of samples provided,\nall samples will be used for all trees (no sampling)." @@ -93709,7 +93161,7 @@ "qname": "sklearn.ensemble._iforest.IsolationForest.__init__.contamination", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "'auto' or float, default='auto'", "description": "The amount of contamination of the data set, i.e. the proportion\nof outliers in the data set. Used when fitting to define the threshold\non the scores of the samples.\n\n - If 'auto', the threshold is determined as in the\n original paper.\n - If float, the contamination should be in the range (0, 0.5].\n\n.. versionchanged:: 0.22\n The default value of ``contamination`` changed from 0.1\n to ``'auto'``." @@ -93742,10 +93194,10 @@ "qname": "sklearn.ensemble._iforest.IsolationForest.__init__.max_features", "default_value": "1.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or float, default=1.0", - "description": "The number of features to draw from X to train each base estimator.\n\n - If int, then draw `max_features` features.\n - If float, then draw `max_features * X.shape[1]` features.\n\nNote: using a float number less than 1.0 or integer less than number of\nfeatures will enable feature subsampling and leads to a longerr runtime." + "description": "The number of features to draw from X to train each base estimator.\n\n - If int, then draw `max_features` features.\n - If float, then draw `max_features * X.shape[1]` features." }, "type": { "kind": "UnionType", @@ -93767,7 +93219,7 @@ "qname": "sklearn.ensemble._iforest.IsolationForest.__init__.bootstrap", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If True, individual trees are fit on random subsets of the training\ndata sampled with replacement. If False, sampling without replacement\nis performed." @@ -93783,7 +93235,7 @@ "qname": "sklearn.ensemble._iforest.IsolationForest.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "The number of jobs to run in parallel for both :meth:`fit` and\n:meth:`predict`. ``None`` means 1 unless in a\n:obj:`joblib.parallel_backend` context. ``-1`` means using all\nprocessors. See :term:`Glossary ` for more details." @@ -93799,7 +93251,7 @@ "qname": "sklearn.ensemble._iforest.IsolationForest.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Controls the pseudo-randomness of the selection of the feature\nand split values for each branching step and each tree in the forest.\n\nPass an int for reproducible results across multiple function calls.\nSee :term:`Glossary `." @@ -93828,7 +93280,7 @@ "qname": "sklearn.ensemble._iforest.IsolationForest.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "Controls the verbosity of the tree building process." @@ -93844,7 +93296,7 @@ "qname": "sklearn.ensemble._iforest.IsolationForest.__init__.warm_start", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "When set to ``True``, reuse the solution of the previous call to fit\nand add more estimators to the ensemble, otherwise, just fit a whole\nnew forest. See :term:`the Glossary `.\n\n.. versionadded:: 0.21" @@ -93856,7 +93308,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -94077,7 +93529,7 @@ "qname": "sklearn.ensemble._iforest.IsolationForest.decision_function.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -94090,7 +93542,7 @@ "qname": "sklearn.ensemble._iforest.IsolationForest.decision_function.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The input samples. Internally, it will be converted to\n``dtype=np.float32`` and if a sparse matrix is provided\nto a sparse ``csr_matrix``." @@ -94111,7 +93563,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Average anomaly score of X of the base classifiers.\n\nThe anomaly score of an input sample is computed as\nthe mean anomaly score of the trees in the forest.\n\nThe measure of normality of an observation given a tree is the depth\nof the leaf containing this observation, which is equivalent to\nthe number of splittings required to isolate this point. In case of\nseveral observations n_left in the leaf, the average path length of\na n_left samples isolation tree is added.", "docstring": "\n Average anomaly score of X of the base classifiers.\n\n The anomaly score of an input sample is computed as\n the mean anomaly score of the trees in the forest.\n\n The measure of normality of an observation given a tree is the depth\n of the leaf containing this observation, which is equivalent to\n the number of splittings required to isolate this point. In case of\n several observations n_left in the leaf, the average path length of\n a n_left samples isolation tree is added.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csr_matrix``.\n\n Returns\n -------\n scores : ndarray of shape (n_samples,)\n The anomaly score of the input samples.\n The lower, the more abnormal. Negative scores represent outliers,\n positive scores represent inliers.\n " }, @@ -94127,7 +93579,7 @@ "qname": "sklearn.ensemble._iforest.IsolationForest.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -94140,7 +93592,7 @@ "qname": "sklearn.ensemble._iforest.IsolationForest.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The input samples. Use ``dtype=np.float32`` for maximum\nefficiency. Sparse matrices are also supported, use sparse\n``csc_matrix`` for maximum efficiency." @@ -94165,7 +93617,7 @@ "qname": "sklearn.ensemble._iforest.IsolationForest.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -94181,7 +93633,7 @@ "qname": "sklearn.ensemble._iforest.IsolationForest.fit.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "Sample weights. If None, then samples are equally weighted." @@ -94193,7 +93645,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit estimator.", "docstring": "\n Fit estimator.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Use ``dtype=np.float32`` for maximum\n efficiency. Sparse matrices are also supported, use sparse\n ``csc_matrix`` for maximum efficiency.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights. If None, then samples are equally weighted.\n\n Returns\n -------\n self : object\n Fitted estimator.\n " }, @@ -94209,7 +93661,7 @@ "qname": "sklearn.ensemble._iforest.IsolationForest.predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -94222,7 +93674,7 @@ "qname": "sklearn.ensemble._iforest.IsolationForest.predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The input samples. Internally, it will be converted to\n``dtype=np.float32`` and if a sparse matrix is provided\nto a sparse ``csr_matrix``." @@ -94243,7 +93695,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict if a particular sample is an outlier or not.", "docstring": "\n Predict if a particular sample is an outlier or not.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csr_matrix``.\n\n Returns\n -------\n is_inlier : ndarray of shape (n_samples,)\n For each observation, tells whether or not (+1 or -1) it should\n be considered as an inlier according to the fitted model.\n " }, @@ -94259,7 +93711,7 @@ "qname": "sklearn.ensemble._iforest.IsolationForest.score_samples.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -94272,7 +93724,7 @@ "qname": "sklearn.ensemble._iforest.IsolationForest.score_samples.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The input samples." @@ -94293,7 +93745,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Opposite of the anomaly score defined in the original paper.\n\nThe anomaly score of an input sample is computed as\nthe mean anomaly score of the trees in the forest.\n\nThe measure of normality of an observation given a tree is the depth\nof the leaf containing this observation, which is equivalent to\nthe number of splittings required to isolate this point. In case of\nseveral observations n_left in the leaf, the average path length of\na n_left samples isolation tree is added.", "docstring": "\n Opposite of the anomaly score defined in the original paper.\n\n The anomaly score of an input sample is computed as\n the mean anomaly score of the trees in the forest.\n\n The measure of normality of an observation given a tree is the depth\n of the leaf containing this observation, which is equivalent to\n the number of splittings required to isolate this point. In case of\n several observations n_left in the leaf, the average path length of\n a n_left samples isolation tree is added.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples.\n\n Returns\n -------\n scores : ndarray of shape (n_samples,)\n The anomaly score of the input samples.\n The lower, the more abnormal.\n " }, @@ -94334,7 +93786,7 @@ "qname": "sklearn.ensemble._stacking.StackingClassifier.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -94347,7 +93799,7 @@ "qname": "sklearn.ensemble._stacking.StackingClassifier.__init__.estimators", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "list of (str, estimator)", "description": "Base estimators which will be stacked together. Each element of the\nlist is defined as a tuple of string (i.e. name) and an estimator\ninstance. An estimator can be set to 'drop' using `set_params`." @@ -94363,7 +93815,7 @@ "qname": "sklearn.ensemble._stacking.StackingClassifier.__init__.final_estimator", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "estimator, default=None", "description": "A classifier which will be used to combine the base estimators.\nThe default classifier is a\n:class:`~sklearn.linear_model.LogisticRegression`." @@ -94379,7 +93831,7 @@ "qname": "sklearn.ensemble._stacking.StackingClassifier.__init__.cv", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, cross-validation generator, iterable, or \"prefit\", default=None", "description": "Determines the cross-validation splitting strategy used in\n`cross_val_predict` to train `final_estimator`. Possible inputs for\ncv are:\n\n* None, to use the default 5-fold cross validation,\n* integer, to specify the number of folds in a (Stratified) KFold,\n* An object to be used as a cross-validation generator,\n* An iterable yielding train, test splits,\n* `\"prefit\"` to assume the `estimators` are prefit. In this case, the\n estimators will not be refitted.\n\nFor integer/None inputs, if the estimator is a classifier and y is\neither binary or multiclass,\n:class:`~sklearn.model_selection.StratifiedKFold` is used.\nIn all other cases, :class:`~sklearn.model_selection.KFold` is used.\nThese splitters are instantiated with `shuffle=False` so the splits\nwill be the same across calls.\n\nRefer :ref:`User Guide ` for the various\ncross-validation strategies that can be used here.\n\nIf \"prefit\" is passed, it is assumed that all `estimators` have\nbeen fitted already. The `final_estimator_` is trained on the `estimators`\npredictions on the full training set and are **not** cross validated\npredictions. Please note that if the models have been trained on the same\ndata to train the stacking model, there is a very high risk of overfitting.\n\n.. versionadded:: 1.1\n The 'prefit' option was added in 1.1\n\n.. note::\n A larger number of split will provide no benefits if the number\n of training samples is large enough. Indeed, the training time\n will increase. ``cv`` is not used for model evaluation but for\n prediction." @@ -94412,14 +93864,14 @@ "qname": "sklearn.ensemble._stacking.StackingClassifier.__init__.stack_method", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'auto', 'predict_proba', 'decision_function', 'predict'}, default='auto'", "description": "Methods called for each base estimator. It can be:\n\n* if 'auto', it will try to invoke, for each estimator,\n `'predict_proba'`, `'decision_function'` or `'predict'` in that\n order.\n* otherwise, one of `'predict_proba'`, `'decision_function'` or\n `'predict'`. If the method is not implemented by the estimator, it\n will raise an error." }, "type": { "kind": "EnumType", - "values": ["decision_function", "auto", "predict", "predict_proba"] + "values": ["auto", "predict_proba", "predict", "decision_function"] } }, { @@ -94428,7 +93880,7 @@ "qname": "sklearn.ensemble._stacking.StackingClassifier.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "The number of jobs to run in parallel all `estimators` `fit`.\n`None` means 1 unless in a `joblib.parallel_backend` context. -1 means\nusing all processors. See Glossary for more details." @@ -94444,7 +93896,7 @@ "qname": "sklearn.ensemble._stacking.StackingClassifier.__init__.passthrough", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "When False, only the predictions of estimators will be used as\ntraining data for `final_estimator`. When True, the\n`final_estimator` is trained on the predictions as well as the\noriginal training data." @@ -94460,7 +93912,7 @@ "qname": "sklearn.ensemble._stacking.StackingClassifier.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "Verbosity level." @@ -94472,7 +93924,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -94538,7 +93990,7 @@ "qname": "sklearn.ensemble._stacking.StackingClassifier.decision_function.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -94551,7 +94003,7 @@ "qname": "sklearn.ensemble._stacking.StackingClassifier.decision_function.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Training vectors, where `n_samples` is the number of samples and\n`n_features` is the number of features." @@ -94572,7 +94024,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Decision function for samples in `X` using the final estimator.", "docstring": "Decision function for samples in `X` using the final estimator.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vectors, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n Returns\n -------\n decisions : ndarray of shape (n_samples,), (n_samples, n_classes), or (n_samples, n_classes * (n_classes-1) / 2)\n The decision function computed the final estimator.\n " }, @@ -94588,7 +94040,7 @@ "qname": "sklearn.ensemble._stacking.StackingClassifier.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -94601,7 +94053,7 @@ "qname": "sklearn.ensemble._stacking.StackingClassifier.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Training vectors, where `n_samples` is the number of samples and\n`n_features` is the number of features." @@ -94626,7 +94078,7 @@ "qname": "sklearn.ensemble._stacking.StackingClassifier.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "Target values." @@ -94642,7 +94094,7 @@ "qname": "sklearn.ensemble._stacking.StackingClassifier.fit.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "Sample weights. If None, then samples are equally weighted.\nNote that this is supported only if all underlying estimators\nsupport sample weights." @@ -94654,7 +94106,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the estimators.", "docstring": "Fit the estimators.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vectors, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n y : array-like of shape (n_samples,)\n Target values.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights. If None, then samples are equally weighted.\n Note that this is supported only if all underlying estimators\n support sample weights.\n\n Returns\n -------\n self : object\n Returns a fitted instance of estimator.\n " }, @@ -94670,7 +94122,7 @@ "qname": "sklearn.ensemble._stacking.StackingClassifier.predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -94683,7 +94135,7 @@ "qname": "sklearn.ensemble._stacking.StackingClassifier.predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Training vectors, where `n_samples` is the number of samples and\n`n_features` is the number of features." @@ -94704,7 +94156,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict target for X.", "docstring": "Predict target for X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vectors, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n **predict_params : dict of str -> obj\n Parameters to the `predict` called by the `final_estimator`. Note\n that this may be used to return uncertainties from some estimators\n with `return_std` or `return_cov`. Be aware that it will only\n accounts for uncertainty in the final estimator.\n\n Returns\n -------\n y_pred : ndarray of shape (n_samples,) or (n_samples, n_output)\n Predicted targets.\n " }, @@ -94720,7 +94172,7 @@ "qname": "sklearn.ensemble._stacking.StackingClassifier.predict_proba.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -94733,7 +94185,7 @@ "qname": "sklearn.ensemble._stacking.StackingClassifier.predict_proba.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Training vectors, where `n_samples` is the number of samples and\n`n_features` is the number of features." @@ -94754,7 +94206,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict class probabilities for `X` using the final estimator.", "docstring": "Predict class probabilities for `X` using the final estimator.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vectors, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n Returns\n -------\n probabilities : ndarray of shape (n_samples, n_classes) or list of ndarray of shape (n_output,)\n The class probabilities of the input samples.\n " }, @@ -94770,7 +94222,7 @@ "qname": "sklearn.ensemble._stacking.StackingClassifier.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -94783,7 +94235,7 @@ "qname": "sklearn.ensemble._stacking.StackingClassifier.transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Training vectors, where `n_samples` is the number of samples and\n`n_features` is the number of features." @@ -94804,7 +94256,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Return class labels or probabilities for X for each estimator.", "docstring": "Return class labels or probabilities for X for each estimator.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vectors, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n Returns\n -------\n y_preds : ndarray of shape (n_samples, n_estimators) or (n_samples, n_classes * n_estimators)\n Prediction outputs for each estimator.\n " }, @@ -94820,7 +94272,7 @@ "qname": "sklearn.ensemble._stacking.StackingRegressor.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -94833,7 +94285,7 @@ "qname": "sklearn.ensemble._stacking.StackingRegressor.__init__.estimators", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "list of (str, estimator)", "description": "Base estimators which will be stacked together. Each element of the\nlist is defined as a tuple of string (i.e. name) and an estimator\ninstance. An estimator can be set to 'drop' using `set_params`." @@ -94849,7 +94301,7 @@ "qname": "sklearn.ensemble._stacking.StackingRegressor.__init__.final_estimator", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "estimator, default=None", "description": "A regressor which will be used to combine the base estimators.\nThe default regressor is a :class:`~sklearn.linear_model.RidgeCV`." @@ -94865,7 +94317,7 @@ "qname": "sklearn.ensemble._stacking.StackingRegressor.__init__.cv", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, cross-validation generator, iterable, or \"prefit\", default=None", "description": "Determines the cross-validation splitting strategy used in\n`cross_val_predict` to train `final_estimator`. Possible inputs for\ncv are:\n\n* None, to use the default 5-fold cross validation,\n* integer, to specify the number of folds in a (Stratified) KFold,\n* An object to be used as a cross-validation generator,\n* An iterable yielding train, test splits.\n* \"prefit\" to assume the `estimators` are prefit, and skip cross validation\n\nFor integer/None inputs, if the estimator is a classifier and y is\neither binary or multiclass,\n:class:`~sklearn.model_selection.StratifiedKFold` is used.\nIn all other cases, :class:`~sklearn.model_selection.KFold` is used.\nThese splitters are instantiated with `shuffle=False` so the splits\nwill be the same across calls.\n\nRefer :ref:`User Guide ` for the various\ncross-validation strategies that can be used here.\n\nIf \"prefit\" is passed, it is assumed that all `estimators` have\nbeen fitted already. The `final_estimator_` is trained on the `estimators`\npredictions on the full training set and are **not** cross validated\npredictions. Please note that if the models have been trained on the same\ndata to train the stacking model, there is a very high risk of overfitting.\n\n.. versionadded:: 1.1\n The 'prefit' option was added in 1.1\n\n.. note::\n A larger number of split will provide no benefits if the number\n of training samples is large enough. Indeed, the training time\n will increase. ``cv`` is not used for model evaluation but for\n prediction." @@ -94898,7 +94350,7 @@ "qname": "sklearn.ensemble._stacking.StackingRegressor.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "The number of jobs to run in parallel for `fit` of all `estimators`.\n`None` means 1 unless in a `joblib.parallel_backend` context. -1 means\nusing all processors. See Glossary for more details." @@ -94914,7 +94366,7 @@ "qname": "sklearn.ensemble._stacking.StackingRegressor.__init__.passthrough", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "When False, only the predictions of estimators will be used as\ntraining data for `final_estimator`. When True, the\n`final_estimator` is trained on the predictions as well as the\noriginal training data." @@ -94930,7 +94382,7 @@ "qname": "sklearn.ensemble._stacking.StackingRegressor.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "Verbosity level." @@ -94942,7 +94394,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -95008,7 +94460,7 @@ "qname": "sklearn.ensemble._stacking.StackingRegressor.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -95021,7 +94473,7 @@ "qname": "sklearn.ensemble._stacking.StackingRegressor.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Training vectors, where `n_samples` is the number of samples and\n`n_features` is the number of features." @@ -95046,7 +94498,7 @@ "qname": "sklearn.ensemble._stacking.StackingRegressor.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "Target values." @@ -95062,7 +94514,7 @@ "qname": "sklearn.ensemble._stacking.StackingRegressor.fit.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "Sample weights. If None, then samples are equally weighted.\nNote that this is supported only if all underlying estimators\nsupport sample weights." @@ -95074,7 +94526,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the estimators.", "docstring": "Fit the estimators.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vectors, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n y : array-like of shape (n_samples,)\n Target values.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights. If None, then samples are equally weighted.\n Note that this is supported only if all underlying estimators\n support sample weights.\n\n Returns\n -------\n self : object\n Returns a fitted instance.\n " }, @@ -95090,7 +94542,7 @@ "qname": "sklearn.ensemble._stacking.StackingRegressor.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -95103,7 +94555,7 @@ "qname": "sklearn.ensemble._stacking.StackingRegressor.transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Training vectors, where `n_samples` is the number of samples and\n`n_features` is the number of features." @@ -95124,7 +94576,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Return the predictions for X for each estimator.", "docstring": "Return the predictions for X for each estimator.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vectors, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n Returns\n -------\n y_preds : ndarray of shape (n_samples, n_estimators)\n Prediction outputs for each estimator.\n " }, @@ -95704,7 +95156,7 @@ "qname": "sklearn.ensemble._voting.VotingClassifier.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -95717,7 +95169,7 @@ "qname": "sklearn.ensemble._voting.VotingClassifier.__init__.estimators", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "list of (str, estimator) tuples", "description": "Invoking the ``fit`` method on the ``VotingClassifier`` will fit clones\nof those original estimators that will be stored in the class attribute\n``self.estimators_``. An estimator can be set to ``'drop'`` using\n:meth:`set_params`.\n\n.. versionchanged:: 0.21\n ``'drop'`` is accepted. Using None was deprecated in 0.22 and\n support was removed in 0.24." @@ -95733,7 +95185,7 @@ "qname": "sklearn.ensemble._voting.VotingClassifier.__init__.voting", "default_value": "'hard'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'hard', 'soft'}, default='hard'", "description": "If 'hard', uses predicted class labels for majority rule voting.\nElse if 'soft', predicts the class label based on the argmax of\nthe sums of the predicted probabilities, which is recommended for\nan ensemble of well-calibrated classifiers." @@ -95749,7 +95201,7 @@ "qname": "sklearn.ensemble._voting.VotingClassifier.__init__.weights", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_classifiers,), default=None", "description": "Sequence of weights (`float` or `int`) to weight the occurrences of\npredicted class labels (`hard` voting) or class probabilities\nbefore averaging (`soft` voting). Uses uniform weights if `None`." @@ -95765,7 +95217,7 @@ "qname": "sklearn.ensemble._voting.VotingClassifier.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "The number of jobs to run in parallel for ``fit``.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details.\n\n.. versionadded:: 0.18" @@ -95781,7 +95233,7 @@ "qname": "sklearn.ensemble._voting.VotingClassifier.__init__.flatten_transform", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Affects shape of transform output only when voting='soft'\nIf voting='soft' and flatten_transform=True, transform method returns\nmatrix with shape (n_samples, n_classifiers * n_classes). If\nflatten_transform=False, it returns\n(n_classifiers, n_samples, n_classes)." @@ -95797,7 +95249,7 @@ "qname": "sklearn.ensemble._voting.VotingClassifier.__init__.verbose", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If True, the time elapsed while fitting will be printed as it\nis completed.\n\n.. versionadded:: 0.23" @@ -95809,7 +95261,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -95888,7 +95340,7 @@ "qname": "sklearn.ensemble._voting.VotingClassifier.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -95901,7 +95353,7 @@ "qname": "sklearn.ensemble._voting.VotingClassifier.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Training vectors, where `n_samples` is the number of samples and\n`n_features` is the number of features." @@ -95926,7 +95378,7 @@ "qname": "sklearn.ensemble._voting.VotingClassifier.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "Target values." @@ -95942,7 +95394,7 @@ "qname": "sklearn.ensemble._voting.VotingClassifier.fit.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "Sample weights. If None, then samples are equally weighted.\nNote that this is supported only if all underlying estimators\nsupport sample weights.\n\n.. versionadded:: 0.18" @@ -95954,7 +95406,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the estimators.", "docstring": "Fit the estimators.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vectors, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n y : array-like of shape (n_samples,)\n Target values.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights. If None, then samples are equally weighted.\n Note that this is supported only if all underlying estimators\n support sample weights.\n\n .. versionadded:: 0.18\n\n Returns\n -------\n self : object\n Returns the instance itself.\n " }, @@ -95970,7 +95422,7 @@ "qname": "sklearn.ensemble._voting.VotingClassifier.get_feature_names_out.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -95983,7 +95435,7 @@ "qname": "sklearn.ensemble._voting.VotingClassifier.get_feature_names_out.input_features", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of str or None, default=None", "description": "Not used, present here for API consistency by convention." @@ -96004,7 +95456,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Get output feature names for transformation.", "docstring": "Get output feature names for transformation.\n\n Parameters\n ----------\n input_features : array-like of str or None, default=None\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n feature_names_out : ndarray of str objects\n Transformed feature names.\n " }, @@ -96020,7 +95472,7 @@ "qname": "sklearn.ensemble._voting.VotingClassifier.predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -96033,7 +95485,7 @@ "qname": "sklearn.ensemble._voting.VotingClassifier.predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The input samples." @@ -96054,7 +95506,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict class labels for X.", "docstring": "Predict class labels for X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples.\n\n Returns\n -------\n maj : array-like of shape (n_samples,)\n Predicted class labels.\n " }, @@ -96070,7 +95522,7 @@ "qname": "sklearn.ensemble._voting.VotingClassifier.predict_proba.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -96083,7 +95535,7 @@ "qname": "sklearn.ensemble._voting.VotingClassifier.predict_proba.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The input samples." @@ -96104,7 +95556,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute probabilities of possible outcomes for samples in X.", "docstring": "Compute probabilities of possible outcomes for samples in X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples.\n\n Returns\n -------\n avg : array-like of shape (n_samples, n_classes)\n Weighted average probability for each class per sample.\n " }, @@ -96120,7 +95572,7 @@ "qname": "sklearn.ensemble._voting.VotingClassifier.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -96133,7 +95585,7 @@ "qname": "sklearn.ensemble._voting.VotingClassifier.transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Training vectors, where `n_samples` is the number of samples and\n`n_features` is the number of features." @@ -96154,7 +95606,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Return class labels or probabilities for X for each estimator.", "docstring": "Return class labels or probabilities for X for each estimator.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vectors, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n Returns\n -------\n probabilities_or_labels\n If `voting='soft'` and `flatten_transform=True`:\n returns ndarray of shape (n_samples, n_classifiers * n_classes),\n being class probabilities calculated by each classifier.\n If `voting='soft' and `flatten_transform=False`:\n ndarray of shape (n_classifiers, n_samples, n_classes)\n If `voting='hard'`:\n ndarray of shape (n_samples, n_classifiers), being\n class labels predicted by each classifier.\n " }, @@ -96170,7 +95622,7 @@ "qname": "sklearn.ensemble._voting.VotingRegressor.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -96183,7 +95635,7 @@ "qname": "sklearn.ensemble._voting.VotingRegressor.__init__.estimators", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "list of (str, estimator) tuples", "description": "Invoking the ``fit`` method on the ``VotingRegressor`` will fit clones\nof those original estimators that will be stored in the class attribute\n``self.estimators_``. An estimator can be set to ``'drop'`` using\n:meth:`set_params`.\n\n.. versionchanged:: 0.21\n ``'drop'`` is accepted. Using None was deprecated in 0.22 and\n support was removed in 0.24." @@ -96199,7 +95651,7 @@ "qname": "sklearn.ensemble._voting.VotingRegressor.__init__.weights", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_regressors,), default=None", "description": "Sequence of weights (`float` or `int`) to weight the occurrences of\npredicted values before averaging. Uses uniform weights if `None`." @@ -96215,7 +95667,7 @@ "qname": "sklearn.ensemble._voting.VotingRegressor.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "The number of jobs to run in parallel for ``fit``.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details." @@ -96231,7 +95683,7 @@ "qname": "sklearn.ensemble._voting.VotingRegressor.__init__.verbose", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If True, the time elapsed while fitting will be printed as it\nis completed.\n\n.. versionadded:: 0.23" @@ -96243,7 +95695,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -96259,7 +95711,7 @@ "qname": "sklearn.ensemble._voting.VotingRegressor.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -96272,7 +95724,7 @@ "qname": "sklearn.ensemble._voting.VotingRegressor.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Training vectors, where `n_samples` is the number of samples and\n`n_features` is the number of features." @@ -96297,7 +95749,7 @@ "qname": "sklearn.ensemble._voting.VotingRegressor.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "Target values." @@ -96313,7 +95765,7 @@ "qname": "sklearn.ensemble._voting.VotingRegressor.fit.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "Sample weights. If None, then samples are equally weighted.\nNote that this is supported only if all underlying estimators\nsupport sample weights." @@ -96325,7 +95777,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the estimators.", "docstring": "Fit the estimators.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vectors, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n y : array-like of shape (n_samples,)\n Target values.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights. If None, then samples are equally weighted.\n Note that this is supported only if all underlying estimators\n support sample weights.\n\n Returns\n -------\n self : object\n Fitted estimator.\n " }, @@ -96341,7 +95793,7 @@ "qname": "sklearn.ensemble._voting.VotingRegressor.get_feature_names_out.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -96354,7 +95806,7 @@ "qname": "sklearn.ensemble._voting.VotingRegressor.get_feature_names_out.input_features", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of str or None, default=None", "description": "Not used, present here for API consistency by convention." @@ -96375,7 +95827,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Get output feature names for transformation.", "docstring": "Get output feature names for transformation.\n\n Parameters\n ----------\n input_features : array-like of str or None, default=None\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n feature_names_out : ndarray of str objects\n Transformed feature names.\n " }, @@ -96391,7 +95843,7 @@ "qname": "sklearn.ensemble._voting.VotingRegressor.predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -96404,7 +95856,7 @@ "qname": "sklearn.ensemble._voting.VotingRegressor.predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The input samples." @@ -96425,7 +95877,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict regression target for X.\n\nThe predicted regression target of an input sample is computed as the\nmean predicted regression targets of the estimators in the ensemble.", "docstring": "Predict regression target for X.\n\n The predicted regression target of an input sample is computed as the\n mean predicted regression targets of the estimators in the ensemble.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples.\n\n Returns\n -------\n y : ndarray of shape (n_samples,)\n The predicted values.\n " }, @@ -96441,7 +95893,7 @@ "qname": "sklearn.ensemble._voting.VotingRegressor.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -96454,7 +95906,7 @@ "qname": "sklearn.ensemble._voting.VotingRegressor.transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The input samples." @@ -96475,7 +95927,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Return predictions for X for each estimator.", "docstring": "Return predictions for X for each estimator.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples.\n\n Returns\n -------\n predictions : ndarray of shape (n_samples, n_classifiers)\n Values predicted by each regressor.\n " }, @@ -96823,7 +96275,7 @@ "qname": "sklearn.ensemble._weight_boosting.AdaBoostClassifier.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -96836,7 +96288,7 @@ "qname": "sklearn.ensemble._weight_boosting.AdaBoostClassifier.__init__.base_estimator", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "object, default=None", "description": "The base estimator from which the boosted ensemble is built.\nSupport for sample weighting is required, as well as proper\n``classes_`` and ``n_classes_`` attributes. If ``None``, then\nthe base estimator is :class:`~sklearn.tree.DecisionTreeClassifier`\ninitialized with `max_depth=1`." @@ -96852,7 +96304,7 @@ "qname": "sklearn.ensemble._weight_boosting.AdaBoostClassifier.__init__.n_estimators", "default_value": "50", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=50", "description": "The maximum number of estimators at which boosting is terminated.\nIn case of perfect fit, the learning procedure is stopped early.\nValues must be in the range `[1, inf)`." @@ -96868,7 +96320,7 @@ "qname": "sklearn.ensemble._weight_boosting.AdaBoostClassifier.__init__.learning_rate", "default_value": "1.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1.0", "description": "Weight applied to each classifier at each boosting iteration. A higher\nlearning rate increases the contribution of each classifier. There is\na trade-off between the `learning_rate` and `n_estimators` parameters.\nValues must be in the range `(0.0, inf)`." @@ -96884,7 +96336,7 @@ "qname": "sklearn.ensemble._weight_boosting.AdaBoostClassifier.__init__.algorithm", "default_value": "'SAMME.R'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'SAMME', 'SAMME.R'}, default='SAMME.R'", "description": "If 'SAMME.R' then use the SAMME.R real boosting algorithm.\n``base_estimator`` must support calculation of class probabilities.\nIf 'SAMME' then use the SAMME discrete boosting algorithm.\nThe SAMME.R algorithm typically converges faster than SAMME,\nachieving a lower test error with fewer boosting iterations." @@ -96900,7 +96352,7 @@ "qname": "sklearn.ensemble._weight_boosting.AdaBoostClassifier.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Controls the random seed given at each `base_estimator` at each\nboosting iteration.\nThus, it is only used when `base_estimator` exposes a `random_state`.\nPass an int for reproducible output across multiple function calls.\nSee :term:`Glossary `." @@ -96925,7 +96377,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -97298,7 +96750,7 @@ "qname": "sklearn.ensemble._weight_boosting.AdaBoostClassifier.decision_function.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -97311,7 +96763,7 @@ "qname": "sklearn.ensemble._weight_boosting.AdaBoostClassifier.decision_function.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The training input samples. Sparse matrix can be CSC, CSR, COO,\nDOK, or LIL. COO, DOK, and LIL are converted to CSR." @@ -97332,7 +96784,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute the decision function of ``X``.", "docstring": "Compute the decision function of ``X``.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The training input samples. Sparse matrix can be CSC, CSR, COO,\n DOK, or LIL. COO, DOK, and LIL are converted to CSR.\n\n Returns\n -------\n score : ndarray of shape of (n_samples, k)\n The decision function of the input samples. The order of\n outputs is the same of that of the :term:`classes_` attribute.\n Binary classification is a special cases with ``k == 1``,\n otherwise ``k==n_classes``. For binary classification,\n values closer to -1 or 1 mean more like the first or second\n class in ``classes_``, respectively.\n " }, @@ -97348,7 +96800,7 @@ "qname": "sklearn.ensemble._weight_boosting.AdaBoostClassifier.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -97361,7 +96813,7 @@ "qname": "sklearn.ensemble._weight_boosting.AdaBoostClassifier.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The training input samples. Sparse matrix can be CSC, CSR, COO,\nDOK, or LIL. COO, DOK, and LIL are converted to CSR." @@ -97386,7 +96838,7 @@ "qname": "sklearn.ensemble._weight_boosting.AdaBoostClassifier.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "The target values (class labels)." @@ -97402,7 +96854,7 @@ "qname": "sklearn.ensemble._weight_boosting.AdaBoostClassifier.fit.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "Sample weights. If None, the sample weights are initialized to\n``1 / n_samples``." @@ -97414,7 +96866,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Build a boosted classifier from the training set (X, y).", "docstring": "Build a boosted classifier from the training set (X, y).\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The training input samples. Sparse matrix can be CSC, CSR, COO,\n DOK, or LIL. COO, DOK, and LIL are converted to CSR.\n\n y : array-like of shape (n_samples,)\n The target values (class labels).\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights. If None, the sample weights are initialized to\n ``1 / n_samples``.\n\n Returns\n -------\n self : object\n Fitted estimator.\n " }, @@ -97430,7 +96882,7 @@ "qname": "sklearn.ensemble._weight_boosting.AdaBoostClassifier.predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -97443,7 +96895,7 @@ "qname": "sklearn.ensemble._weight_boosting.AdaBoostClassifier.predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The training input samples. Sparse matrix can be CSC, CSR, COO,\nDOK, or LIL. COO, DOK, and LIL are converted to CSR." @@ -97464,7 +96916,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict classes for X.\n\nThe predicted class of an input sample is computed as the weighted mean\nprediction of the classifiers in the ensemble.", "docstring": "Predict classes for X.\n\n The predicted class of an input sample is computed as the weighted mean\n prediction of the classifiers in the ensemble.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The training input samples. Sparse matrix can be CSC, CSR, COO,\n DOK, or LIL. COO, DOK, and LIL are converted to CSR.\n\n Returns\n -------\n y : ndarray of shape (n_samples,)\n The predicted classes.\n " }, @@ -97480,7 +96932,7 @@ "qname": "sklearn.ensemble._weight_boosting.AdaBoostClassifier.predict_log_proba.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -97493,7 +96945,7 @@ "qname": "sklearn.ensemble._weight_boosting.AdaBoostClassifier.predict_log_proba.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The training input samples. Sparse matrix can be CSC, CSR, COO,\nDOK, or LIL. COO, DOK, and LIL are converted to CSR." @@ -97514,7 +96966,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict class log-probabilities for X.\n\nThe predicted class log-probabilities of an input sample is computed as\nthe weighted mean predicted class log-probabilities of the classifiers\nin the ensemble.", "docstring": "Predict class log-probabilities for X.\n\n The predicted class log-probabilities of an input sample is computed as\n the weighted mean predicted class log-probabilities of the classifiers\n in the ensemble.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The training input samples. Sparse matrix can be CSC, CSR, COO,\n DOK, or LIL. COO, DOK, and LIL are converted to CSR.\n\n Returns\n -------\n p : ndarray of shape (n_samples, n_classes)\n The class probabilities of the input samples. The order of\n outputs is the same of that of the :term:`classes_` attribute.\n " }, @@ -97530,7 +96982,7 @@ "qname": "sklearn.ensemble._weight_boosting.AdaBoostClassifier.predict_proba.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -97543,7 +96995,7 @@ "qname": "sklearn.ensemble._weight_boosting.AdaBoostClassifier.predict_proba.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The training input samples. Sparse matrix can be CSC, CSR, COO,\nDOK, or LIL. COO, DOK, and LIL are converted to CSR." @@ -97564,7 +97016,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict class probabilities for X.\n\nThe predicted class probabilities of an input sample is computed as\nthe weighted mean predicted class probabilities of the classifiers\nin the ensemble.", "docstring": "Predict class probabilities for X.\n\n The predicted class probabilities of an input sample is computed as\n the weighted mean predicted class probabilities of the classifiers\n in the ensemble.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The training input samples. Sparse matrix can be CSC, CSR, COO,\n DOK, or LIL. COO, DOK, and LIL are converted to CSR.\n\n Returns\n -------\n p : ndarray of shape (n_samples, n_classes)\n The class probabilities of the input samples. The order of\n outputs is the same of that of the :term:`classes_` attribute.\n " }, @@ -97580,7 +97032,7 @@ "qname": "sklearn.ensemble._weight_boosting.AdaBoostClassifier.staged_decision_function.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -97593,7 +97045,7 @@ "qname": "sklearn.ensemble._weight_boosting.AdaBoostClassifier.staged_decision_function.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The training input samples. Sparse matrix can be CSC, CSR, COO,\nDOK, or LIL. COO, DOK, and LIL are converted to CSR." @@ -97614,7 +97066,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute decision function of ``X`` for each boosting iteration.\n\nThis method allows monitoring (i.e. determine error on testing set)\nafter each boosting iteration.", "docstring": "Compute decision function of ``X`` for each boosting iteration.\n\n This method allows monitoring (i.e. determine error on testing set)\n after each boosting iteration.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The training input samples. Sparse matrix can be CSC, CSR, COO,\n DOK, or LIL. COO, DOK, and LIL are converted to CSR.\n\n Yields\n ------\n score : generator of ndarray of shape (n_samples, k)\n The decision function of the input samples. The order of\n outputs is the same of that of the :term:`classes_` attribute.\n Binary classification is a special cases with ``k == 1``,\n otherwise ``k==n_classes``. For binary classification,\n values closer to -1 or 1 mean more like the first or second\n class in ``classes_``, respectively.\n " }, @@ -97630,7 +97082,7 @@ "qname": "sklearn.ensemble._weight_boosting.AdaBoostClassifier.staged_predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -97643,7 +97095,7 @@ "qname": "sklearn.ensemble._weight_boosting.AdaBoostClassifier.staged_predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "The input samples. Sparse matrix can be CSC, CSR, COO,\nDOK, or LIL. COO, DOK, and LIL are converted to CSR." @@ -97655,7 +97107,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Return staged predictions for X.\n\nThe predicted class of an input sample is computed as the weighted mean\nprediction of the classifiers in the ensemble.\n\nThis generator method yields the ensemble prediction after each\niteration of boosting and therefore allows monitoring, such as to\ndetermine the prediction on a test set after each boost.", "docstring": "Return staged predictions for X.\n\n The predicted class of an input sample is computed as the weighted mean\n prediction of the classifiers in the ensemble.\n\n This generator method yields the ensemble prediction after each\n iteration of boosting and therefore allows monitoring, such as to\n determine the prediction on a test set after each boost.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The input samples. Sparse matrix can be CSC, CSR, COO,\n DOK, or LIL. COO, DOK, and LIL are converted to CSR.\n\n Yields\n ------\n y : generator of ndarray of shape (n_samples,)\n The predicted classes.\n " }, @@ -97671,7 +97123,7 @@ "qname": "sklearn.ensemble._weight_boosting.AdaBoostClassifier.staged_predict_proba.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -97684,7 +97136,7 @@ "qname": "sklearn.ensemble._weight_boosting.AdaBoostClassifier.staged_predict_proba.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The training input samples. Sparse matrix can be CSC, CSR, COO,\nDOK, or LIL. COO, DOK, and LIL are converted to CSR." @@ -97705,7 +97157,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict class probabilities for X.\n\nThe predicted class probabilities of an input sample is computed as\nthe weighted mean predicted class probabilities of the classifiers\nin the ensemble.\n\nThis generator method yields the ensemble predicted class probabilities\nafter each iteration of boosting and therefore allows monitoring, such\nas to determine the predicted class probabilities on a test set after\neach boost.", "docstring": "Predict class probabilities for X.\n\n The predicted class probabilities of an input sample is computed as\n the weighted mean predicted class probabilities of the classifiers\n in the ensemble.\n\n This generator method yields the ensemble predicted class probabilities\n after each iteration of boosting and therefore allows monitoring, such\n as to determine the predicted class probabilities on a test set after\n each boost.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The training input samples. Sparse matrix can be CSC, CSR, COO,\n DOK, or LIL. COO, DOK, and LIL are converted to CSR.\n\n Yields\n ------\n p : generator of ndarray of shape (n_samples,)\n The class probabilities of the input samples. The order of\n outputs is the same of that of the :term:`classes_` attribute.\n " }, @@ -97721,7 +97173,7 @@ "qname": "sklearn.ensemble._weight_boosting.AdaBoostRegressor.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -97734,7 +97186,7 @@ "qname": "sklearn.ensemble._weight_boosting.AdaBoostRegressor.__init__.base_estimator", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "object, default=None", "description": "The base estimator from which the boosted ensemble is built.\nIf ``None``, then the base estimator is\n:class:`~sklearn.tree.DecisionTreeRegressor` initialized with\n`max_depth=3`." @@ -97750,7 +97202,7 @@ "qname": "sklearn.ensemble._weight_boosting.AdaBoostRegressor.__init__.n_estimators", "default_value": "50", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=50", "description": "The maximum number of estimators at which boosting is terminated.\nIn case of perfect fit, the learning procedure is stopped early.\nValues must be in the range `[1, inf)`." @@ -97766,7 +97218,7 @@ "qname": "sklearn.ensemble._weight_boosting.AdaBoostRegressor.__init__.learning_rate", "default_value": "1.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1.0", "description": "Weight applied to each regressor at each boosting iteration. A higher\nlearning rate increases the contribution of each regressor. There is\na trade-off between the `learning_rate` and `n_estimators` parameters.\nValues must be in the range `(0.0, inf)`." @@ -97782,7 +97234,7 @@ "qname": "sklearn.ensemble._weight_boosting.AdaBoostRegressor.__init__.loss", "default_value": "'linear'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'linear', 'square', 'exponential'}, default='linear'", "description": "The loss function to use when updating the weights after each\nboosting iteration." @@ -97798,7 +97250,7 @@ "qname": "sklearn.ensemble._weight_boosting.AdaBoostRegressor.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Controls the random seed given at each `base_estimator` at each\nboosting iteration.\nThus, it is only used when `base_estimator` exposes a `random_state`.\nIn addition, it controls the bootstrap of the weights used to train the\n`base_estimator` at each boosting iteration.\nPass an int for reproducible output across multiple function calls.\nSee :term:`Glossary `." @@ -97823,7 +97275,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -98029,7 +97481,7 @@ "qname": "sklearn.ensemble._weight_boosting.AdaBoostRegressor.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -98042,7 +97494,7 @@ "qname": "sklearn.ensemble._weight_boosting.AdaBoostRegressor.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The training input samples. Sparse matrix can be CSC, CSR, COO,\nDOK, or LIL. COO, DOK, and LIL are converted to CSR." @@ -98067,7 +97519,7 @@ "qname": "sklearn.ensemble._weight_boosting.AdaBoostRegressor.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "The target values (real numbers)." @@ -98083,7 +97535,7 @@ "qname": "sklearn.ensemble._weight_boosting.AdaBoostRegressor.fit.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "Sample weights. If None, the sample weights are initialized to\n1 / n_samples." @@ -98095,7 +97547,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Build a boosted regressor from the training set (X, y).", "docstring": "Build a boosted regressor from the training set (X, y).\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The training input samples. Sparse matrix can be CSC, CSR, COO,\n DOK, or LIL. COO, DOK, and LIL are converted to CSR.\n\n y : array-like of shape (n_samples,)\n The target values (real numbers).\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights. If None, the sample weights are initialized to\n 1 / n_samples.\n\n Returns\n -------\n self : object\n Fitted AdaBoostRegressor estimator.\n " }, @@ -98111,7 +97563,7 @@ "qname": "sklearn.ensemble._weight_boosting.AdaBoostRegressor.predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -98124,7 +97576,7 @@ "qname": "sklearn.ensemble._weight_boosting.AdaBoostRegressor.predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The training input samples. Sparse matrix can be CSC, CSR, COO,\nDOK, or LIL. COO, DOK, and LIL are converted to CSR." @@ -98145,7 +97597,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict regression value for X.\n\nThe predicted regression value of an input sample is computed\nas the weighted median prediction of the regressors in the ensemble.", "docstring": "Predict regression value for X.\n\n The predicted regression value of an input sample is computed\n as the weighted median prediction of the regressors in the ensemble.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The training input samples. Sparse matrix can be CSC, CSR, COO,\n DOK, or LIL. COO, DOK, and LIL are converted to CSR.\n\n Returns\n -------\n y : ndarray of shape (n_samples,)\n The predicted regression values.\n " }, @@ -98161,7 +97613,7 @@ "qname": "sklearn.ensemble._weight_boosting.AdaBoostRegressor.staged_predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -98174,7 +97626,7 @@ "qname": "sklearn.ensemble._weight_boosting.AdaBoostRegressor.staged_predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The training input samples." @@ -98195,7 +97647,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Return staged predictions for X.\n\nThe predicted regression value of an input sample is computed\nas the weighted median prediction of the regressors in the ensemble.\n\nThis generator method yields the ensemble prediction after each\niteration of boosting and therefore allows monitoring, such as to\ndetermine the prediction on a test set after each boost.", "docstring": "Return staged predictions for X.\n\n The predicted regression value of an input sample is computed\n as the weighted median prediction of the regressors in the ensemble.\n\n This generator method yields the ensemble prediction after each\n iteration of boosting and therefore allows monitoring, such as to\n determine the prediction on a test set after each boost.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The training input samples.\n\n Yields\n ------\n y : generator of ndarray of shape (n_samples,)\n The predicted regression values.\n " }, @@ -103044,7 +102496,7 @@ "qname": "sklearn.feature_extraction._dict_vectorizer.DictVectorizer.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -103057,7 +102509,7 @@ "qname": "sklearn.feature_extraction._dict_vectorizer.DictVectorizer.__init__.dtype", "default_value": "np.float64", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dtype, default=np.float64", "description": "The type of feature values. Passed to Numpy array/scipy.sparse matrix\nconstructors as the dtype argument." @@ -103073,7 +102525,7 @@ "qname": "sklearn.feature_extraction._dict_vectorizer.DictVectorizer.__init__.separator", "default_value": "'='", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str, default=\"=\"", "description": "Separator string used when constructing new features for one-hot\ncoding." @@ -103089,7 +102541,7 @@ "qname": "sklearn.feature_extraction._dict_vectorizer.DictVectorizer.__init__.sparse", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether transform should produce scipy.sparse matrices." @@ -103105,7 +102557,7 @@ "qname": "sklearn.feature_extraction._dict_vectorizer.DictVectorizer.__init__.sort", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether ``feature_names_`` and ``vocabulary_`` should be\nsorted when fitting." @@ -103117,7 +102569,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -103338,7 +102790,7 @@ "qname": "sklearn.feature_extraction._dict_vectorizer.DictVectorizer.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -103351,7 +102803,7 @@ "qname": "sklearn.feature_extraction._dict_vectorizer.DictVectorizer.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Mapping or iterable over Mappings", "description": "Dict(s) or Mapping(s) from feature names (arbitrary Python\nobjects) to feature values (strings or convertible to dtype).\n\n.. versionchanged:: 0.24\n Accepts multiple string values for one categorical feature." @@ -103376,7 +102828,7 @@ "qname": "sklearn.feature_extraction._dict_vectorizer.DictVectorizer.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "(ignored)", "description": "Ignored parameter." @@ -103388,7 +102840,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Learn a list of feature name -> indices mappings.", "docstring": "Learn a list of feature name -> indices mappings.\n\n Parameters\n ----------\n X : Mapping or iterable over Mappings\n Dict(s) or Mapping(s) from feature names (arbitrary Python\n objects) to feature values (strings or convertible to dtype).\n\n .. versionchanged:: 0.24\n Accepts multiple string values for one categorical feature.\n\n y : (ignored)\n Ignored parameter.\n\n Returns\n -------\n self : object\n DictVectorizer class instance.\n " }, @@ -103404,7 +102856,7 @@ "qname": "sklearn.feature_extraction._dict_vectorizer.DictVectorizer.fit_transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -103417,7 +102869,7 @@ "qname": "sklearn.feature_extraction._dict_vectorizer.DictVectorizer.fit_transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Mapping or iterable over Mappings", "description": "Dict(s) or Mapping(s) from feature names (arbitrary Python\nobjects) to feature values (strings or convertible to dtype).\n\n.. versionchanged:: 0.24\n Accepts multiple string values for one categorical feature." @@ -103442,7 +102894,7 @@ "qname": "sklearn.feature_extraction._dict_vectorizer.DictVectorizer.fit_transform.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "(ignored)", "description": "Ignored parameter." @@ -103454,7 +102906,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Learn a list of feature name -> indices mappings and transform X.\n\nLike fit(X) followed by transform(X), but does not require\nmaterializing X in memory.", "docstring": "Learn a list of feature name -> indices mappings and transform X.\n\n Like fit(X) followed by transform(X), but does not require\n materializing X in memory.\n\n Parameters\n ----------\n X : Mapping or iterable over Mappings\n Dict(s) or Mapping(s) from feature names (arbitrary Python\n objects) to feature values (strings or convertible to dtype).\n\n .. versionchanged:: 0.24\n Accepts multiple string values for one categorical feature.\n\n y : (ignored)\n Ignored parameter.\n\n Returns\n -------\n Xa : {array, sparse matrix}\n Feature vectors; always 2-d.\n " }, @@ -103472,7 +102924,7 @@ "qname": "sklearn.feature_extraction._dict_vectorizer.DictVectorizer.get_feature_names.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -103481,7 +102933,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Return a list of feature names, ordered by their indices.\n\nIf one-of-K coding is applied to categorical features, this will\ninclude the constructed feature names but not the original ones.", "docstring": "Return a list of feature names, ordered by their indices.\n\n If one-of-K coding is applied to categorical features, this will\n include the constructed feature names but not the original ones.\n\n Returns\n -------\n feature_names_ : list of length (n_features,)\n List containing the feature names (e.g., \"f=ham\" and \"f=spam\").\n " }, @@ -103497,7 +102949,7 @@ "qname": "sklearn.feature_extraction._dict_vectorizer.DictVectorizer.get_feature_names_out.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -103510,7 +102962,7 @@ "qname": "sklearn.feature_extraction._dict_vectorizer.DictVectorizer.get_feature_names_out.input_features", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of str or None, default=None", "description": "Not used, present here for API consistency by convention." @@ -103531,7 +102983,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Get output feature names for transformation.", "docstring": "Get output feature names for transformation.\n\n Parameters\n ----------\n input_features : array-like of str or None, default=None\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n feature_names_out : ndarray of str objects\n Transformed feature names.\n " }, @@ -103547,7 +102999,7 @@ "qname": "sklearn.feature_extraction._dict_vectorizer.DictVectorizer.inverse_transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -103560,7 +103012,7 @@ "qname": "sklearn.feature_extraction._dict_vectorizer.DictVectorizer.inverse_transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Sample matrix." @@ -103585,7 +103037,7 @@ "qname": "sklearn.feature_extraction._dict_vectorizer.DictVectorizer.inverse_transform.dict_type", "default_value": "dict", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "type, default=dict", "description": "Constructor for feature mappings. Must conform to the\ncollections.Mapping API." @@ -103597,7 +103049,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Transform array or sparse matrix X back to feature mappings.\n\nX must have been produced by this DictVectorizer's transform or\nfit_transform method; it may only have passed through transformers\nthat preserve the number of features and their order.\n\nIn the case of one-hot/one-of-K coding, the constructed feature\nnames and values are returned rather than the original ones.", "docstring": "Transform array or sparse matrix X back to feature mappings.\n\n X must have been produced by this DictVectorizer's transform or\n fit_transform method; it may only have passed through transformers\n that preserve the number of features and their order.\n\n In the case of one-hot/one-of-K coding, the constructed feature\n names and values are returned rather than the original ones.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Sample matrix.\n dict_type : type, default=dict\n Constructor for feature mappings. Must conform to the\n collections.Mapping API.\n\n Returns\n -------\n D : list of dict_type objects of shape (n_samples,)\n Feature mappings for the samples in X.\n " }, @@ -103613,7 +103065,7 @@ "qname": "sklearn.feature_extraction._dict_vectorizer.DictVectorizer.restrict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -103626,7 +103078,7 @@ "qname": "sklearn.feature_extraction._dict_vectorizer.DictVectorizer.restrict.support", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like", "description": "Boolean mask or list of indices (as returned by the get_support\nmember of feature selectors)." @@ -103642,7 +103094,7 @@ "qname": "sklearn.feature_extraction._dict_vectorizer.DictVectorizer.restrict.indices", "default_value": "False", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Whether support is a list of indices." @@ -103654,7 +103106,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Restrict the features to those in support using feature selection.\n\nThis function modifies the estimator in-place.", "docstring": "Restrict the features to those in support using feature selection.\n\n This function modifies the estimator in-place.\n\n Parameters\n ----------\n support : array-like\n Boolean mask or list of indices (as returned by the get_support\n member of feature selectors).\n indices : bool, default=False\n Whether support is a list of indices.\n\n Returns\n -------\n self : object\n DictVectorizer class instance.\n\n Examples\n --------\n >>> from sklearn.feature_extraction import DictVectorizer\n >>> from sklearn.feature_selection import SelectKBest, chi2\n >>> v = DictVectorizer()\n >>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]\n >>> X = v.fit_transform(D)\n >>> support = SelectKBest(chi2, k=2).fit(X, [0, 1])\n >>> v.get_feature_names_out()\n array(['bar', 'baz', 'foo'], ...)\n >>> v.restrict(support.get_support())\n DictVectorizer()\n >>> v.get_feature_names_out()\n array(['bar', 'foo'], ...)\n " }, @@ -103670,7 +103122,7 @@ "qname": "sklearn.feature_extraction._dict_vectorizer.DictVectorizer.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -103683,7 +103135,7 @@ "qname": "sklearn.feature_extraction._dict_vectorizer.DictVectorizer.transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Mapping or iterable over Mappings of shape (n_samples,)", "description": "Dict(s) or Mapping(s) from feature names (arbitrary Python\nobjects) to feature values (strings or convertible to dtype)." @@ -103704,7 +103156,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Transform feature->value dicts to array or sparse matrix.\n\nNamed features not encountered during fit or fit_transform will be\nsilently ignored.", "docstring": "Transform feature->value dicts to array or sparse matrix.\n\n Named features not encountered during fit or fit_transform will be\n silently ignored.\n\n Parameters\n ----------\n X : Mapping or iterable over Mappings of shape (n_samples,)\n Dict(s) or Mapping(s) from feature names (arbitrary Python\n objects) to feature values (strings or convertible to dtype).\n\n Returns\n -------\n Xa : {array, sparse matrix}\n Feature vectors; always 2-d.\n " }, @@ -103745,7 +103197,7 @@ "qname": "sklearn.feature_extraction._hash.FeatureHasher.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -103758,7 +103210,7 @@ "qname": "sklearn.feature_extraction._hash.FeatureHasher.__init__.n_features", "default_value": "2**20", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=2**20", "description": "The number of features (columns) in the output matrices. Small numbers\nof features are likely to cause hash collisions, but large numbers\nwill cause larger coefficient dimensions in linear learners." @@ -103774,7 +103226,7 @@ "qname": "sklearn.feature_extraction._hash.FeatureHasher.__init__.input_type", "default_value": "'dict'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str, default='dict'", "description": "Choose a string from {'dict', 'pair', 'string'}.\nEither \"dict\" (the default) to accept dictionaries over\n(feature_name, value); \"pair\" to accept pairs of (feature_name, value);\nor \"string\" to accept single strings.\nfeature_name should be a string, while value should be a number.\nIn the case of \"string\", a value of 1 is implied.\nThe feature_name is hashed to find the appropriate column for the\nfeature. The value's sign might be flipped in the output (but see\nnon_negative, below)." @@ -103790,7 +103242,7 @@ "qname": "sklearn.feature_extraction._hash.FeatureHasher.__init__.dtype", "default_value": "np.float64", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "numpy dtype, default=np.float64", "description": "The type of feature values. Passed to scipy.sparse matrix constructors\nas the dtype argument. Do not set this to bool, np.boolean or any\nunsigned integer type." @@ -103806,7 +103258,7 @@ "qname": "sklearn.feature_extraction._hash.FeatureHasher.__init__.alternate_sign", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "When True, an alternating sign is added to the features as to\napproximately conserve the inner product in the hashed space even for\nsmall n_features. This approach is similar to sparse random projection.\n\n.. versionchanged:: 0.19\n ``alternate_sign`` replaces the now deprecated ``non_negative``\n parameter." @@ -103818,7 +103270,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -103897,7 +103349,7 @@ "qname": "sklearn.feature_extraction._hash.FeatureHasher.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -103910,7 +103362,7 @@ "qname": "sklearn.feature_extraction._hash.FeatureHasher.fit.X", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present here for API consistency by convention." @@ -103926,7 +103378,7 @@ "qname": "sklearn.feature_extraction._hash.FeatureHasher.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present here for API consistency by convention." @@ -103938,7 +103390,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "No-op.\n\nThis method doesn't do anything. It exists purely for compatibility\nwith the scikit-learn transformer API.", "docstring": "No-op.\n\n This method doesn't do anything. It exists purely for compatibility\n with the scikit-learn transformer API.\n\n Parameters\n ----------\n X : Ignored\n Not used, present here for API consistency by convention.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n self : object\n FeatureHasher class instance.\n " }, @@ -103954,7 +103406,7 @@ "qname": "sklearn.feature_extraction._hash.FeatureHasher.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -103967,7 +103419,7 @@ "qname": "sklearn.feature_extraction._hash.FeatureHasher.transform.raw_X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "iterable over iterable over raw features, length = n_samples", "description": "Samples. Each sample must be iterable an (e.g., a list or tuple)\ncontaining/generating feature names (and optionally values, see\nthe input_type constructor argument) which will be hashed.\nraw_X need not support the len function, so it can be the result\nof a generator; n_samples is determined on the fly." @@ -103988,7 +103440,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Transform a sequence of instances to a scipy.sparse matrix.", "docstring": "Transform a sequence of instances to a scipy.sparse matrix.\n\n Parameters\n ----------\n raw_X : iterable over iterable over raw features, length = n_samples\n Samples. Each sample must be iterable an (e.g., a list or tuple)\n containing/generating feature names (and optionally values, see\n the input_type constructor argument) which will be hashed.\n raw_X need not support the len function, so it can be the result\n of a generator; n_samples is determined on the fly.\n\n Returns\n -------\n X : sparse matrix of shape (n_samples, n_features)\n Feature matrix, for use with estimators or further transformers.\n " }, @@ -105090,7 +104542,7 @@ }, "type": { "kind": "EnumType", - "values": ["content", "file", "filename"] + "values": ["file", "content", "filename"] } }, { @@ -105138,7 +104590,7 @@ }, "type": { "kind": "EnumType", - "values": ["ascii", "unicode"] + "values": ["unicode", "ascii"] } }, { @@ -105262,7 +104714,7 @@ "types": [ { "kind": "EnumType", - "values": ["word", "char", "char_wb"] + "values": ["char_wb", "char", "word"] }, { "kind": "NamedType", @@ -105964,7 +105416,7 @@ }, "type": { "kind": "EnumType", - "values": ["content", "file", "filename"] + "values": ["file", "content", "filename"] } }, { @@ -106012,7 +105464,7 @@ }, "type": { "kind": "EnumType", - "values": ["ascii", "unicode"] + "values": ["unicode", "ascii"] } }, { @@ -106136,7 +105588,7 @@ "types": [ { "kind": "EnumType", - "values": ["word", "char", "char_wb"] + "values": ["char_wb", "char", "word"] }, { "kind": "NamedType", @@ -106861,7 +106313,7 @@ }, "type": { "kind": "EnumType", - "values": ["content", "file", "filename"] + "values": ["file", "content", "filename"] } }, { @@ -106909,7 +106361,7 @@ }, "type": { "kind": "EnumType", - "values": ["ascii", "unicode"] + "values": ["unicode", "ascii"] } }, { @@ -106976,7 +106428,7 @@ "types": [ { "kind": "EnumType", - "values": ["word", "char", "char_wb"] + "values": ["char_wb", "char", "word"] }, { "kind": "NamedType", @@ -108334,7 +107786,7 @@ "qname": "sklearn.feature_selection._base.SelectorMixin.get_feature_names_out.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -108347,7 +107799,7 @@ "qname": "sklearn.feature_selection._base.SelectorMixin.get_feature_names_out.input_features", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of str or None, default=None", "description": "Input features.\n\n- If `input_features` is `None`, then `feature_names_in_` is\n used as feature names in. If `feature_names_in_` is not defined,\n then the following input feature names are generated:\n `[\"x0\", \"x1\", ..., \"x(n_features_in_ - 1)\"]`.\n- If `input_features` is an array-like, then `input_features` must\n match `feature_names_in_` if `feature_names_in_` is defined." @@ -108368,7 +107820,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Mask feature names according to selected features.", "docstring": "Mask feature names according to selected features.\n\n Parameters\n ----------\n input_features : array-like of str or None, default=None\n Input features.\n\n - If `input_features` is `None`, then `feature_names_in_` is\n used as feature names in. If `feature_names_in_` is not defined,\n then the following input feature names are generated:\n `[\"x0\", \"x1\", ..., \"x(n_features_in_ - 1)\"]`.\n - If `input_features` is an array-like, then `input_features` must\n match `feature_names_in_` if `feature_names_in_` is defined.\n\n Returns\n -------\n feature_names_out : ndarray of str objects\n Transformed feature names.\n " }, @@ -108384,7 +107836,7 @@ "qname": "sklearn.feature_selection._base.SelectorMixin.get_support.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -108397,7 +107849,7 @@ "qname": "sklearn.feature_selection._base.SelectorMixin.get_support.indices", "default_value": "False", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If True, the return value will be an array of integers, rather\nthan a boolean mask." @@ -108409,7 +107861,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Get a mask, or integer index, of the features selected.", "docstring": "\n Get a mask, or integer index, of the features selected.\n\n Parameters\n ----------\n indices : bool, default=False\n If True, the return value will be an array of integers, rather\n than a boolean mask.\n\n Returns\n -------\n support : array\n An index that selects the retained features from a feature vector.\n If `indices` is False, this is a boolean array of shape\n [# input features], in which an element is True iff its\n corresponding feature is selected for retention. If `indices` is\n True, this is an integer array of shape [# output features] whose\n values are indices into the input feature vector.\n " }, @@ -108425,7 +107877,7 @@ "qname": "sklearn.feature_selection._base.SelectorMixin.inverse_transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -108438,7 +107890,7 @@ "qname": "sklearn.feature_selection._base.SelectorMixin.inverse_transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array of shape [n_samples, n_selected_features]", "description": "The input samples." @@ -108459,7 +107911,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Reverse the transformation operation.", "docstring": "Reverse the transformation operation.\n\n Parameters\n ----------\n X : array of shape [n_samples, n_selected_features]\n The input samples.\n\n Returns\n -------\n X_r : array of shape [n_samples, n_original_features]\n `X` with columns of zeros inserted where features would have\n been removed by :meth:`transform`.\n " }, @@ -108475,7 +107927,7 @@ "qname": "sklearn.feature_selection._base.SelectorMixin.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -108488,7 +107940,7 @@ "qname": "sklearn.feature_selection._base.SelectorMixin.transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array of shape [n_samples, n_features]", "description": "The input samples." @@ -108509,7 +107961,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Reduce X to the selected features.", "docstring": "Reduce X to the selected features.\n\n Parameters\n ----------\n X : array of shape [n_samples, n_features]\n The input samples.\n\n Returns\n -------\n X_r : array of shape [n_samples, n_selected_features]\n The input samples with only the selected features.\n " }, @@ -108577,7 +108029,7 @@ }, "type": { "kind": "EnumType", - "values": ["norm", "square"] + "values": ["square", "norm"] } }, { @@ -108614,7 +108066,7 @@ "qname": "sklearn.feature_selection._from_model.SelectFromModel.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -108627,7 +108079,7 @@ "qname": "sklearn.feature_selection._from_model.SelectFromModel.__init__.estimator", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "object", "description": "The base estimator from which the transformer is built.\nThis can be both a fitted (if ``prefit`` is set to True)\nor a non-fitted estimator. The estimator should have a\n``feature_importances_`` or ``coef_`` attribute after fitting.\nOtherwise, the ``importance_getter`` parameter should be used." @@ -108643,7 +108095,7 @@ "qname": "sklearn.feature_selection._from_model.SelectFromModel.__init__.threshold", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str or float, default=None", "description": "The threshold value to use for feature selection. Features whose\nimportance is greater or equal are kept while the others are\ndiscarded. If \"median\" (resp. \"mean\"), then the ``threshold`` value is\nthe median (resp. the mean) of the feature importances. A scaling\nfactor (e.g., \"1.25*mean\") may also be used. If None and if the\nestimator has a parameter penalty set to l1, either explicitly\nor implicitly (e.g, Lasso), the threshold used is 1e-5.\nOtherwise, \"mean\" is used by default." @@ -108668,7 +108120,7 @@ "qname": "sklearn.feature_selection._from_model.SelectFromModel.__init__.prefit", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Whether a prefit model is expected to be passed into the constructor\ndirectly or not.\nIf `True`, `estimator` must be a fitted estimator.\nIf `False`, `estimator` is fitted and updated by calling\n`fit` and `partial_fit`, respectively." @@ -108684,7 +108136,7 @@ "qname": "sklearn.feature_selection._from_model.SelectFromModel.__init__.norm_order", "default_value": "1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "non-zero int, inf, -inf, default=1", "description": "Order of the norm used to filter the vectors of coefficients below\n``threshold`` in the case where the ``coef_`` attribute of the\nestimator is of dimension 2." @@ -108713,7 +108165,7 @@ "qname": "sklearn.feature_selection._from_model.SelectFromModel.__init__.max_features", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, callable, default=None", "description": "The maximum number of features to select.\n\n- If an integer, then it specifies the maximum number of features to\n allow.\n- If a callable, then it specifies how to calculate the maximum number of\n features allowed by using the output of `max_feaures(X)`.\n- If `None`, then all features are kept.\n\nTo only select based on ``max_features``, set ``threshold=-np.inf``.\n\n.. versionadded:: 0.20\n.. versionchanged:: 1.1\n `max_features` accepts a callable." @@ -108738,7 +108190,7 @@ "qname": "sklearn.feature_selection._from_model.SelectFromModel.__init__.importance_getter", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str or callable, default='auto'", "description": "If 'auto', uses the feature importance either through a ``coef_``\nattribute or ``feature_importances_`` attribute of estimator.\n\nAlso accepts a string that specifies an attribute name/path\nfor extracting feature importance (implemented with `attrgetter`).\nFor example, give `regressor_.coef_` in case of\n:class:`~sklearn.compose.TransformedTargetRegressor` or\n`named_steps.clf.feature_importances_` in case of\n:class:`~sklearn.pipeline.Pipeline` with its last step named `clf`.\n\nIf `callable`, overrides the default feature importance getter.\nThe callable is passed with the fitted estimator and it should\nreturn importance for each feature.\n\n.. versionadded:: 0.24" @@ -108759,7 +108211,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -108863,7 +108315,7 @@ "qname": "sklearn.feature_selection._from_model.SelectFromModel.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -108876,7 +108328,7 @@ "qname": "sklearn.feature_selection._from_model.SelectFromModel.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "The training input samples." @@ -108892,7 +108344,7 @@ "qname": "sklearn.feature_selection._from_model.SelectFromModel.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "The target values (integers that correspond to classes in\nclassification, real numbers in regression)." @@ -108904,7 +108356,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the SelectFromModel meta-transformer.", "docstring": "Fit the SelectFromModel meta-transformer.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The training input samples.\n\n y : array-like of shape (n_samples,), default=None\n The target values (integers that correspond to classes in\n classification, real numbers in regression).\n\n **fit_params : dict\n Other estimator specific parameters.\n\n Returns\n -------\n self : object\n Fitted estimator.\n " }, @@ -108920,7 +108372,7 @@ "qname": "sklearn.feature_selection._from_model.SelectFromModel.n_features_in_.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -108929,7 +108381,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Number of features seen during `fit`.", "docstring": "Number of features seen during `fit`." }, @@ -108945,7 +108397,7 @@ "qname": "sklearn.feature_selection._from_model.SelectFromModel.partial_fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -108958,7 +108410,7 @@ "qname": "sklearn.feature_selection._from_model.SelectFromModel.partial_fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "The training input samples." @@ -108974,7 +108426,7 @@ "qname": "sklearn.feature_selection._from_model.SelectFromModel.partial_fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "The target values (integers that correspond to classes in\nclassification, real numbers in regression)." @@ -108986,7 +108438,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the SelectFromModel meta-transformer only once.", "docstring": "Fit the SelectFromModel meta-transformer only once.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The training input samples.\n\n y : array-like of shape (n_samples,), default=None\n The target values (integers that correspond to classes in\n classification, real numbers in regression).\n\n **fit_params : dict\n Other estimator specific parameters.\n\n Returns\n -------\n self : object\n Fitted estimator.\n " }, @@ -109002,7 +108454,7 @@ "qname": "sklearn.feature_selection._from_model.SelectFromModel.threshold_.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -109011,7 +108463,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Threshold value used for feature selection.", "docstring": "Threshold value used for feature selection." }, @@ -109796,7 +109248,7 @@ "qname": "sklearn.feature_selection._rfe.RFE.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -109809,7 +109261,7 @@ "qname": "sklearn.feature_selection._rfe.RFE.__init__.estimator", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "``Estimator`` instance", "description": "A supervised learning estimator with a ``fit`` method that provides\ninformation about feature importance\n(e.g. `coef_`, `feature_importances_`)." @@ -109825,7 +109277,7 @@ "qname": "sklearn.feature_selection._rfe.RFE.__init__.n_features_to_select", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or float, default=None", "description": "The number of features to select. If `None`, half of the features are\nselected. If integer, the parameter is the absolute number of features\nto select. If float between 0 and 1, it is the fraction of features to\nselect.\n\n.. versionchanged:: 0.24\n Added float values for fractions." @@ -109850,7 +109302,7 @@ "qname": "sklearn.feature_selection._rfe.RFE.__init__.step", "default_value": "1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or float, default=1", "description": "If greater than or equal to 1, then ``step`` corresponds to the\n(integer) number of features to remove at each iteration.\nIf within (0.0, 1.0), then ``step`` corresponds to the percentage\n(rounded down) of features to remove at each iteration." @@ -109875,7 +109327,7 @@ "qname": "sklearn.feature_selection._rfe.RFE.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "Controls verbosity of output." @@ -109891,7 +109343,7 @@ "qname": "sklearn.feature_selection._rfe.RFE.__init__.importance_getter", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str or callable, default='auto'", "description": "If 'auto', uses the feature importance either through a `coef_`\nor `feature_importances_` attributes of estimator.\n\nAlso accepts a string that specifies an attribute name/path\nfor extracting feature importance (implemented with `attrgetter`).\nFor example, give `regressor_.coef_` in case of\n:class:`~sklearn.compose.TransformedTargetRegressor` or\n`named_steps.clf.feature_importances_` in case of\nclass:`~sklearn.pipeline.Pipeline` with its last step named `clf`.\n\nIf `callable`, overrides the default feature importance getter.\nThe callable is passed with the fitted estimator and it should\nreturn importance for each feature.\n\n.. versionadded:: 0.24" @@ -109912,7 +109364,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -110067,7 +109519,7 @@ "qname": "sklearn.feature_selection._rfe.RFE.classes_.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -110076,7 +109528,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Classes labels available when `estimator` is a classifier.", "docstring": "Classes labels available when `estimator` is a classifier.\n\n Returns\n -------\n ndarray of shape (n_classes,)\n " }, @@ -110092,7 +109544,7 @@ "qname": "sklearn.feature_selection._rfe.RFE.decision_function.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -110105,7 +109557,7 @@ "qname": "sklearn.feature_selection._rfe.RFE.decision_function.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like or sparse matrix} of shape (n_samples, n_features)", "description": "The input samples. Internally, it will be converted to\n``dtype=np.float32`` and if a sparse matrix is provided\nto a sparse ``csr_matrix``." @@ -110126,7 +109578,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute the decision function of ``X``.", "docstring": "Compute the decision function of ``X``.\n\n Parameters\n ----------\n X : {array-like or sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csr_matrix``.\n\n Returns\n -------\n score : array, shape = [n_samples, n_classes] or [n_samples]\n The decision function of the input samples. The order of the\n classes corresponds to that in the attribute :term:`classes_`.\n Regression and binary classification produce an array of shape\n [n_samples].\n " }, @@ -110142,7 +109594,7 @@ "qname": "sklearn.feature_selection._rfe.RFE.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -110155,7 +109607,7 @@ "qname": "sklearn.feature_selection._rfe.RFE.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The training input samples." @@ -110180,7 +109632,7 @@ "qname": "sklearn.feature_selection._rfe.RFE.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "The target values." @@ -110192,7 +109644,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the RFE model and then the underlying estimator on the selected features.", "docstring": "Fit the RFE model and then the underlying estimator on the selected features.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The training input samples.\n\n y : array-like of shape (n_samples,)\n The target values.\n\n **fit_params : dict\n Additional parameters passed to the `fit` method of the underlying\n estimator.\n\n Returns\n -------\n self : object\n Fitted estimator.\n " }, @@ -110208,7 +109660,7 @@ "qname": "sklearn.feature_selection._rfe.RFE.predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -110221,7 +109673,7 @@ "qname": "sklearn.feature_selection._rfe.RFE.predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array of shape [n_samples, n_features]", "description": "The input samples." @@ -110242,7 +109694,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Reduce X to the selected features and then predict using the underlying estimator.", "docstring": "Reduce X to the selected features and then predict using the underlying estimator.\n\n Parameters\n ----------\n X : array of shape [n_samples, n_features]\n The input samples.\n\n Returns\n -------\n y : array of shape [n_samples]\n The predicted target values.\n " }, @@ -110258,7 +109710,7 @@ "qname": "sklearn.feature_selection._rfe.RFE.predict_log_proba.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -110271,7 +109723,7 @@ "qname": "sklearn.feature_selection._rfe.RFE.predict_log_proba.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array of shape [n_samples, n_features]", "description": "The input samples." @@ -110292,7 +109744,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict class log-probabilities for X.", "docstring": "Predict class log-probabilities for X.\n\n Parameters\n ----------\n X : array of shape [n_samples, n_features]\n The input samples.\n\n Returns\n -------\n p : array of shape (n_samples, n_classes)\n The class log-probabilities of the input samples. The order of the\n classes corresponds to that in the attribute :term:`classes_`.\n " }, @@ -110308,7 +109760,7 @@ "qname": "sklearn.feature_selection._rfe.RFE.predict_proba.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -110321,7 +109773,7 @@ "qname": "sklearn.feature_selection._rfe.RFE.predict_proba.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like or sparse matrix} of shape (n_samples, n_features)", "description": "The input samples. Internally, it will be converted to\n``dtype=np.float32`` and if a sparse matrix is provided\nto a sparse ``csr_matrix``." @@ -110342,7 +109794,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict class probabilities for X.", "docstring": "Predict class probabilities for X.\n\n Parameters\n ----------\n X : {array-like or sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csr_matrix``.\n\n Returns\n -------\n p : array of shape (n_samples, n_classes)\n The class probabilities of the input samples. The order of the\n classes corresponds to that in the attribute :term:`classes_`.\n " }, @@ -110358,7 +109810,7 @@ "qname": "sklearn.feature_selection._rfe.RFE.score.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -110371,7 +109823,7 @@ "qname": "sklearn.feature_selection._rfe.RFE.score.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array of shape [n_samples, n_features]", "description": "The input samples." @@ -110396,7 +109848,7 @@ "qname": "sklearn.feature_selection._rfe.RFE.score.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array of shape [n_samples]", "description": "The target values." @@ -110408,7 +109860,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Reduce X to the selected features and return the score of the underlying estimator.", "docstring": "Reduce X to the selected features and return the score of the underlying estimator.\n\n Parameters\n ----------\n X : array of shape [n_samples, n_features]\n The input samples.\n\n y : array of shape [n_samples]\n The target values.\n\n **fit_params : dict\n Parameters to pass to the `score` method of the underlying\n estimator.\n\n .. versionadded:: 1.0\n\n Returns\n -------\n score : float\n Score of the underlying base estimator computed with the selected\n features returned by `rfe.transform(X)` and `y`.\n " }, @@ -110424,7 +109876,7 @@ "qname": "sklearn.feature_selection._rfe.RFECV.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -110437,7 +109889,7 @@ "qname": "sklearn.feature_selection._rfe.RFECV.__init__.estimator", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "``Estimator`` instance", "description": "A supervised learning estimator with a ``fit`` method that provides\ninformation about feature importance either through a ``coef_``\nattribute or through a ``feature_importances_`` attribute." @@ -110453,7 +109905,7 @@ "qname": "sklearn.feature_selection._rfe.RFECV.__init__.step", "default_value": "1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or float, default=1", "description": "If greater than or equal to 1, then ``step`` corresponds to the\n(integer) number of features to remove at each iteration.\nIf within (0.0, 1.0), then ``step`` corresponds to the percentage\n(rounded down) of features to remove at each iteration.\nNote that the last iteration may remove fewer than ``step`` features in\norder to reach ``min_features_to_select``." @@ -110478,7 +109930,7 @@ "qname": "sklearn.feature_selection._rfe.RFECV.__init__.min_features_to_select", "default_value": "1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1", "description": "The minimum number of features to be selected. This number of features\nwill always be scored, even if the difference between the original\nfeature count and ``min_features_to_select`` isn't divisible by\n``step``.\n\n.. versionadded:: 0.20" @@ -110494,7 +109946,7 @@ "qname": "sklearn.feature_selection._rfe.RFECV.__init__.cv", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, cross-validation generator or an iterable, default=None", "description": "Determines the cross-validation splitting strategy.\nPossible inputs for cv are:\n\n- None, to use the default 5-fold cross-validation,\n- integer, to specify the number of folds.\n- :term:`CV splitter`,\n- An iterable yielding (train, test) splits as arrays of indices.\n\nFor integer/None inputs, if ``y`` is binary or multiclass,\n:class:`~sklearn.model_selection.StratifiedKFold` is used. If the\nestimator is a classifier or if ``y`` is neither binary nor multiclass,\n:class:`~sklearn.model_selection.KFold` is used.\n\nRefer :ref:`User Guide ` for the various\ncross-validation strategies that can be used here.\n\n.. versionchanged:: 0.22\n ``cv`` default value of None changed from 3-fold to 5-fold." @@ -110523,7 +109975,7 @@ "qname": "sklearn.feature_selection._rfe.RFECV.__init__.scoring", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str, callable or None, default=None", "description": "A string (see model evaluation documentation) or\na scorer callable object / function with signature\n``scorer(estimator, X, y)``." @@ -110552,7 +110004,7 @@ "qname": "sklearn.feature_selection._rfe.RFECV.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "Controls verbosity of output." @@ -110568,7 +110020,7 @@ "qname": "sklearn.feature_selection._rfe.RFECV.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or None, default=None", "description": "Number of cores to run in parallel while fitting across folds.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details.\n\n.. versionadded:: 0.18" @@ -110593,7 +110045,7 @@ "qname": "sklearn.feature_selection._rfe.RFECV.__init__.importance_getter", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str or callable, default='auto'", "description": "If 'auto', uses the feature importance either through a `coef_`\nor `feature_importances_` attributes of estimator.\n\nAlso accepts a string that specifies an attribute name/path\nfor extracting feature importance.\nFor example, give `regressor_.coef_` in case of\n:class:`~sklearn.compose.TransformedTargetRegressor` or\n`named_steps.clf.feature_importances_` in case of\n:class:`~sklearn.pipeline.Pipeline` with its last step named `clf`.\n\nIf `callable`, overrides the default feature importance getter.\nThe callable is passed with the fitted estimator and it should\nreturn importance for each feature.\n\n.. versionadded:: 0.24" @@ -110614,7 +110066,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -110630,7 +110082,7 @@ "qname": "sklearn.feature_selection._rfe.RFECV.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -110643,7 +110095,7 @@ "qname": "sklearn.feature_selection._rfe.RFECV.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Training vector, where `n_samples` is the number of samples and\n`n_features` is the total number of features." @@ -110668,7 +110120,7 @@ "qname": "sklearn.feature_selection._rfe.RFECV.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "Target values (integers for classification, real numbers for\nregression)." @@ -110684,7 +110136,7 @@ "qname": "sklearn.feature_selection._rfe.RFECV.fit.groups", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,) or None, default=None", "description": "Group labels for the samples used while splitting the dataset into\ntrain/test set. Only used in conjunction with a \"Group\" :term:`cv`\ninstance (e.g., :class:`~sklearn.model_selection.GroupKFold`).\n\n.. versionadded:: 0.20" @@ -110705,7 +110157,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the RFE model and automatically tune the number of selected features.", "docstring": "Fit the RFE model and automatically tune the number of selected features.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vector, where `n_samples` is the number of samples and\n `n_features` is the total number of features.\n\n y : array-like of shape (n_samples,)\n Target values (integers for classification, real numbers for\n regression).\n\n groups : array-like of shape (n_samples,) or None, default=None\n Group labels for the samples used while splitting the dataset into\n train/test set. Only used in conjunction with a \"Group\" :term:`cv`\n instance (e.g., :class:`~sklearn.model_selection.GroupKFold`).\n\n .. versionadded:: 0.20\n\n Returns\n -------\n self : object\n Fitted estimator.\n " }, @@ -110724,7 +110176,7 @@ "qname": "sklearn.feature_selection._rfe.RFECV.grid_scores_.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -110733,7 +110185,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -110877,7 +110329,7 @@ "qname": "sklearn.feature_selection._sequential.SequentialFeatureSelector.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -110890,7 +110342,7 @@ "qname": "sklearn.feature_selection._sequential.SequentialFeatureSelector.__init__.estimator", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "estimator instance", "description": "An unfitted estimator." @@ -110906,7 +110358,7 @@ "qname": "sklearn.feature_selection._sequential.SequentialFeatureSelector.__init__.n_features_to_select", "default_value": "'warn'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "\"auto\", int or float, default='warn'", "description": "If `\"auto\"`, the behaviour depends on the `tol` parameter:\n\n- if `tol` is not `None`, then features are selected until the score\n improvement does not exceed `tol`.\n- otherwise, half of the features are selected.\n\nIf integer, the parameter is the absolute number of features to select.\nIf float between 0 and 1, it is the fraction of features to select.\n\n.. versionadded:: 1.1\n The option `\"auto\"` was added in version 1.1.\n\n.. deprecated:: 1.1\n The default changed from `None` to `\"warn\"` in 1.1 and will become\n `\"auto\"` in 1.3. `None` and `'warn'` will be removed in 1.3.\n To keep the same behaviour as `None`, set\n `n_features_to_select=\"auto\" and `tol=None`." @@ -110935,7 +110387,7 @@ "qname": "sklearn.feature_selection._sequential.SequentialFeatureSelector.__init__.tol", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=None", "description": "If the score is not incremented by at least `tol` between two\nconsecutive feature additions or removals, stop adding or removing.\n`tol` is enabled only when `n_features_to_select` is `\"auto\"`.\n\n.. versionadded:: 1.1" @@ -110951,14 +110403,14 @@ "qname": "sklearn.feature_selection._sequential.SequentialFeatureSelector.__init__.direction", "default_value": "'forward'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'forward', 'backward'}, default='forward'", "description": "Whether to perform forward selection or backward selection." }, "type": { "kind": "EnumType", - "values": ["forward", "backward"] + "values": ["backward", "forward"] } }, { @@ -110967,7 +110419,7 @@ "qname": "sklearn.feature_selection._sequential.SequentialFeatureSelector.__init__.scoring", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str, callable, list/tuple or dict, default=None", "description": "A single str (see :ref:`scoring_parameter`) or a callable\n(see :ref:`scoring`) to evaluate the predictions on the test set.\n\nNOTE that when using custom scorers, each scorer should return a single\nvalue. Metric functions returning a list/array of values can be wrapped\ninto multiple scorers that return one value each.\n\nIf None, the estimator's score method is used." @@ -111000,7 +110452,7 @@ "qname": "sklearn.feature_selection._sequential.SequentialFeatureSelector.__init__.cv", "default_value": "5", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, cross-validation generator or an iterable, default=None", "description": "Determines the cross-validation splitting strategy.\nPossible inputs for cv are:\n\n- None, to use the default 5-fold cross validation,\n- integer, to specify the number of folds in a `(Stratified)KFold`,\n- :term:`CV splitter`,\n- An iterable yielding (train, test) splits as arrays of indices.\n\nFor integer/None inputs, if the estimator is a classifier and ``y`` is\neither binary or multiclass, :class:`StratifiedKFold` is used. In all\nother cases, :class:`KFold` is used. These splitters are instantiated\nwith `shuffle=False` so the splits will be the same across calls.\n\nRefer :ref:`User Guide ` for the various\ncross-validation strategies that can be used here." @@ -111029,7 +110481,7 @@ "qname": "sklearn.feature_selection._sequential.SequentialFeatureSelector.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Number of jobs to run in parallel. When evaluating a new feature to\nadd or remove, the cross-validation procedure is parallel over the\nfolds.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details." @@ -111041,7 +110493,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -111184,7 +110636,7 @@ "qname": "sklearn.feature_selection._sequential.SequentialFeatureSelector.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -111197,7 +110649,7 @@ "qname": "sklearn.feature_selection._sequential.SequentialFeatureSelector.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training vectors, where `n_samples` is the number of samples and\n`n_features` is the number of predictors." @@ -111213,7 +110665,7 @@ "qname": "sklearn.feature_selection._sequential.SequentialFeatureSelector.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "Target values. This parameter may be ignored for\nunsupervised learning." @@ -111225,7 +110677,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Learn the features to select from X.", "docstring": "Learn the features to select from X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training vectors, where `n_samples` is the number of samples and\n `n_features` is the number of predictors.\n\n y : array-like of shape (n_samples,), default=None\n Target values. This parameter may be ignored for\n unsupervised learning.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n " }, @@ -111241,7 +110693,7 @@ "qname": "sklearn.feature_selection._univariate_selection.GenericUnivariateSelect.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -111254,7 +110706,7 @@ "qname": "sklearn.feature_selection._univariate_selection.GenericUnivariateSelect.__init__.score_func", "default_value": "f_classif", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "callable, default=f_classif", "description": "Function taking two arrays X and y, and returning a pair of arrays\n(scores, pvalues). For modes 'percentile' or 'kbest' it can return\na single array scores." @@ -111270,14 +110722,14 @@ "qname": "sklearn.feature_selection._univariate_selection.GenericUnivariateSelect.__init__.mode", "default_value": "'percentile'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'percentile', 'k_best', 'fpr', 'fdr', 'fwe'}, default='percentile'", "description": "Feature selection mode." }, "type": { "kind": "EnumType", - "values": ["fdr", "fwe", "fpr", "k_best", "percentile"] + "values": ["k_best", "fwe", "fdr", "percentile", "fpr"] } }, { @@ -111286,7 +110738,7 @@ "qname": "sklearn.feature_selection._univariate_selection.GenericUnivariateSelect.__init__.param", "default_value": "1e-05", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float or int depending on the feature selection mode, default=1e-5", "description": "Parameter of the corresponding mode." @@ -111307,7 +110759,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -111449,7 +110901,7 @@ "qname": "sklearn.feature_selection._univariate_selection.SelectFdr.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -111462,7 +110914,7 @@ "qname": "sklearn.feature_selection._univariate_selection.SelectFdr.__init__.score_func", "default_value": "f_classif", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "callable, default=f_classif", "description": "Function taking two arrays X and y, and returning a pair of arrays\n(scores, pvalues).\nDefault is f_classif (see below \"See Also\"). The default function only\nworks with classification tasks." @@ -111478,7 +110930,7 @@ "qname": "sklearn.feature_selection._univariate_selection.SelectFdr.__init__.alpha", "default_value": "0.05", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=5e-2", "description": "The highest uncorrected p-value for features to keep." @@ -111490,7 +110942,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -111531,7 +110983,7 @@ "qname": "sklearn.feature_selection._univariate_selection.SelectFpr.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -111544,7 +110996,7 @@ "qname": "sklearn.feature_selection._univariate_selection.SelectFpr.__init__.score_func", "default_value": "f_classif", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "callable, default=f_classif", "description": "Function taking two arrays X and y, and returning a pair of arrays\n(scores, pvalues).\nDefault is f_classif (see below \"See Also\"). The default function only\nworks with classification tasks." @@ -111560,7 +111012,7 @@ "qname": "sklearn.feature_selection._univariate_selection.SelectFpr.__init__.alpha", "default_value": "0.05", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=5e-2", "description": "Features with p-values less than `alpha` are selected." @@ -111572,7 +111024,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -111613,7 +111065,7 @@ "qname": "sklearn.feature_selection._univariate_selection.SelectFwe.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -111626,7 +111078,7 @@ "qname": "sklearn.feature_selection._univariate_selection.SelectFwe.__init__.score_func", "default_value": "f_classif", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "callable, default=f_classif", "description": "Function taking two arrays X and y, and returning a pair of arrays\n(scores, pvalues).\nDefault is f_classif (see below \"See Also\"). The default function only\nworks with classification tasks." @@ -111642,7 +111094,7 @@ "qname": "sklearn.feature_selection._univariate_selection.SelectFwe.__init__.alpha", "default_value": "0.05", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=5e-2", "description": "The highest uncorrected p-value for features to keep." @@ -111654,7 +111106,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -111695,7 +111147,7 @@ "qname": "sklearn.feature_selection._univariate_selection.SelectKBest.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -111708,7 +111160,7 @@ "qname": "sklearn.feature_selection._univariate_selection.SelectKBest.__init__.score_func", "default_value": "f_classif", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "callable, default=f_classif", "description": "Function taking two arrays X and y, and returning a pair of arrays\n(scores, pvalues) or a single array with scores.\nDefault is f_classif (see below \"See Also\"). The default function only\nworks with classification tasks.\n\n.. versionadded:: 0.18" @@ -111724,7 +111176,7 @@ "qname": "sklearn.feature_selection._univariate_selection.SelectKBest.__init__.k", "default_value": "10", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or \"all\", default=10", "description": "Number of top features to select.\nThe \"all\" option bypasses selection, for use in a parameter search." @@ -111745,7 +111197,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -111837,7 +111289,7 @@ "qname": "sklearn.feature_selection._univariate_selection.SelectPercentile.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -111850,7 +111302,7 @@ "qname": "sklearn.feature_selection._univariate_selection.SelectPercentile.__init__.score_func", "default_value": "f_classif", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "callable, default=f_classif", "description": "Function taking two arrays X and y, and returning a pair of arrays\n(scores, pvalues) or a single array with scores.\nDefault is f_classif (see below \"See Also\"). The default function only\nworks with classification tasks.\n\n.. versionadded:: 0.18" @@ -111866,7 +111318,7 @@ "qname": "sklearn.feature_selection._univariate_selection.SelectPercentile.__init__.percentile", "default_value": "10", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=10", "description": "Percent of features to keep." @@ -111878,7 +111330,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -112246,7 +111698,7 @@ "results": [], "is_public": true, "description": "Compute chi-squared stats between each non-negative feature and class.\n\nThis score can be used to select the n_features features with the\nhighest values for the test chi-squared statistic from X, which must\ncontain only non-negative features such as booleans or frequencies\n(e.g., term counts in document classification), relative to the classes.\n\nRecall that the chi-square test measures dependence between stochastic\nvariables, so using this function \"weeds out\" the features that are the\nmost likely to be independent of class and therefore irrelevant for\nclassification.\n\nRead more in the :ref:`User Guide `.", - "docstring": "Compute chi-squared stats between each non-negative feature and class.\n\n This score can be used to select the n_features features with the\n highest values for the test chi-squared statistic from X, which must\n contain only non-negative features such as booleans or frequencies\n (e.g., term counts in document classification), relative to the classes.\n\n Recall that the chi-square test measures dependence between stochastic\n variables, so using this function \"weeds out\" the features that are the\n most likely to be independent of class and therefore irrelevant for\n classification.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Sample vectors.\n\n y : array-like of shape (n_samples,)\n Target vector (class labels).\n\n Returns\n -------\n chi2 : ndarray of shape (n_features,)\n Chi2 statistics for each feature.\n\n p_values : ndarray of shape (n_features,)\n P-values for each feature.\n\n See Also\n --------\n f_classif : ANOVA F-value between label/feature for classification tasks.\n f_regression : F-value between label/feature for regression tasks.\n\n Notes\n -----\n Complexity of this algorithm is O(n_classes * n_features).\n " + "docstring": "Compute chi-squared stats between each non-negative feature and class.\n\n This score can be used to select the n_features features with the\n highest values for the test chi-squared statistic from X, which must\n contain only non-negative features such as booleans or frequencies\n (e.g., term counts in document classification), relative to the classes.\n\n Recall that the chi-square test measures dependence between stochastic\n variables, so using this function \"weeds out\" the features that are the\n most likely to be independent of class and therefore irrelevant for\n classification.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Sample vectors.\n\n y : array-like of shape (n_samples,)\n Target vector (class labels).\n\n Returns\n -------\n chi2 : ndarray of shape (n_features,)\n Chi2 statistics for each feature.\n\n p_values : ndarray of shape (n_features,)\n P-values for each feature.\n\n Notes\n -----\n Complexity of this algorithm is O(n_classes * n_features).\n\n See Also\n --------\n f_classif : ANOVA F-value between label/feature for classification tasks.\n f_regression : F-value between label/feature for regression tasks.\n " }, { "id": "sklearn/sklearn.feature_selection._univariate_selection/f_classif", @@ -112309,8 +111761,8 @@ "parameters": [], "results": [], "is_public": true, - "description": "Perform a 1-way ANOVA.\n\nThe one-way ANOVA tests the null hypothesis that 2 or more groups have\nthe same population mean. The test is applied to samples from two or\nmore groups, possibly with differing sizes.\n\nRead more in the :ref:`User Guide `.", - "docstring": "Perform a 1-way ANOVA.\n\n The one-way ANOVA tests the null hypothesis that 2 or more groups have\n the same population mean. The test is applied to samples from two or\n more groups, possibly with differing sizes.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n *args : {array-like, sparse matrix}\n Sample1, sample2... The sample measurements should be given as\n arguments.\n\n Returns\n -------\n f_statistic : float\n The computed F-value of the test.\n p_value : float\n The associated p-value from the F-distribution.\n\n Notes\n -----\n The ANOVA test has important assumptions that must be satisfied in order\n for the associated p-value to be valid.\n\n 1. The samples are independent\n 2. Each sample is from a normally distributed population\n 3. The population standard deviations of the groups are all equal. This\n property is known as homoscedasticity.\n\n If these assumptions are not true for a given set of data, it may still be\n possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`_) although\n with some loss of power.\n\n The algorithm is from Heiman[2], pp.394-7.\n\n See ``scipy.stats.f_oneway`` that should give the same results while\n being less efficient.\n\n References\n ----------\n .. [1] Lowry, Richard. \"Concepts and Applications of Inferential\n Statistics\". Chapter 14.\n http://faculty.vassar.edu/lowry/ch14pt1.html\n\n .. [2] Heiman, G.W. Research Methods in Statistics. 2002.\n " + "description": "Performs a 1-way ANOVA.\n\nThe one-way ANOVA tests the null hypothesis that 2 or more groups have\nthe same population mean. The test is applied to samples from two or\nmore groups, possibly with differing sizes.\n\nRead more in the :ref:`User Guide `.", + "docstring": "Performs a 1-way ANOVA.\n\n The one-way ANOVA tests the null hypothesis that 2 or more groups have\n the same population mean. The test is applied to samples from two or\n more groups, possibly with differing sizes.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n *args : {array-like, sparse matrix}\n sample1, sample2... The sample measurements should be given as\n arguments.\n\n Returns\n -------\n f_statistic : float\n The computed F-value of the test.\n p_value : float\n The associated p-value from the F-distribution.\n\n Notes\n -----\n The ANOVA test has important assumptions that must be satisfied in order\n for the associated p-value to be valid.\n\n 1. The samples are independent\n 2. Each sample is from a normally distributed population\n 3. The population standard deviations of the groups are all equal. This\n property is known as homoscedasticity.\n\n If these assumptions are not true for a given set of data, it may still be\n possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`_) although\n with some loss of power.\n\n The algorithm is from Heiman[2], pp.394-7.\n\n See ``scipy.stats.f_oneway`` that should give the same results while\n being less efficient.\n\n References\n ----------\n\n .. [1] Lowry, Richard. \"Concepts and Applications of Inferential\n Statistics\". Chapter 14.\n http://faculty.vassar.edu/lowry/ch14pt1.html\n\n .. [2] Heiman, G.W. Research Methods in Statistics. 2002.\n\n " }, { "id": "sklearn/sklearn.feature_selection._univariate_selection/f_regression", @@ -112494,7 +111946,7 @@ "qname": "sklearn.feature_selection._variance_threshold.VarianceThreshold.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -112507,7 +111959,7 @@ "qname": "sklearn.feature_selection._variance_threshold.VarianceThreshold.__init__.threshold", "default_value": "0.0", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0", "description": "Features with a training-set variance lower than this threshold will\nbe removed. The default is to keep all features with non-zero variance,\ni.e. remove the features that have the same value in all samples." @@ -112519,7 +111971,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -112585,7 +112037,7 @@ "qname": "sklearn.feature_selection._variance_threshold.VarianceThreshold.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -112598,7 +112050,7 @@ "qname": "sklearn.feature_selection._variance_threshold.VarianceThreshold.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix}, shape (n_samples, n_features)", "description": "Data from which to compute variances, where `n_samples` is\nthe number of samples and `n_features` is the number of features." @@ -112623,7 +112075,7 @@ "qname": "sklearn.feature_selection._variance_threshold.VarianceThreshold.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "any, default=None", "description": "Ignored. This parameter exists only for compatibility with\nsklearn.pipeline.Pipeline." @@ -112635,7 +112087,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Learn empirical variances from X.", "docstring": "Learn empirical variances from X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n Data from which to compute variances, where `n_samples` is\n the number of samples and `n_features` is the number of features.\n\n y : any, default=None\n Ignored. This parameter exists only for compatibility with\n sklearn.pipeline.Pipeline.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n " }, @@ -112651,7 +112103,7 @@ "qname": "sklearn.gaussian_process._gpc.GaussianProcessClassifier.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -112664,7 +112116,7 @@ "qname": "sklearn.gaussian_process._gpc.GaussianProcessClassifier.__init__.kernel", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "kernel instance, default=None", "description": "The kernel specifying the covariance function of the GP. If None is\npassed, the kernel \"1.0 * RBF(1.0)\" is used as default. Note that\nthe kernel's hyperparameters are optimized during fitting. Also kernel\ncannot be a `CompoundKernel`." @@ -112680,7 +112132,7 @@ "qname": "sklearn.gaussian_process._gpc.GaussianProcessClassifier.__init__.optimizer", "default_value": "'fmin_l_bfgs_b'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "'fmin_l_bfgs_b' or callable, default='fmin_l_bfgs_b'", "description": "Can either be one of the internally supported optimizers for optimizing\nthe kernel's parameters, specified by a string, or an externally\ndefined optimizer passed as a callable. If a callable is passed, it\nmust have the signature::\n\n def optimizer(obj_func, initial_theta, bounds):\n # * 'obj_func' is the objective function to be maximized, which\n # takes the hyperparameters theta as parameter and an\n # optional flag eval_gradient, which determines if the\n # gradient is returned additionally to the function value\n # * 'initial_theta': the initial value for theta, which can be\n # used by local optimizers\n # * 'bounds': the bounds on the values of theta\n ....\n # Returned are the best found hyperparameters theta and\n # the corresponding value of the target function.\n return theta_opt, func_min\n\nPer default, the 'L-BFGS-B' algorithm from scipy.optimize.minimize\nis used. If None is passed, the kernel's parameters are kept fixed.\nAvailable internal optimizers are::\n\n 'fmin_l_bfgs_b'" @@ -112705,7 +112157,7 @@ "qname": "sklearn.gaussian_process._gpc.GaussianProcessClassifier.__init__.n_restarts_optimizer", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "The number of restarts of the optimizer for finding the kernel's\nparameters which maximize the log-marginal likelihood. The first run\nof the optimizer is performed from the kernel's initial parameters,\nthe remaining ones (if any) from thetas sampled log-uniform randomly\nfrom the space of allowed theta-values. If greater than 0, all bounds\nmust be finite. Note that n_restarts_optimizer=0 implies that one\nrun is performed." @@ -112721,7 +112173,7 @@ "qname": "sklearn.gaussian_process._gpc.GaussianProcessClassifier.__init__.max_iter_predict", "default_value": "100", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=100", "description": "The maximum number of iterations in Newton's method for approximating\nthe posterior during predict. Smaller values will reduce computation\ntime at the cost of worse results." @@ -112737,7 +112189,7 @@ "qname": "sklearn.gaussian_process._gpc.GaussianProcessClassifier.__init__.warm_start", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If warm-starts are enabled, the solution of the last Newton iteration\non the Laplace approximation of the posterior mode is used as\ninitialization for the next call of _posterior_mode(). This can speed\nup convergence when _posterior_mode is called several times on similar\nproblems as in hyperparameter optimization. See :term:`the Glossary\n`." @@ -112753,7 +112205,7 @@ "qname": "sklearn.gaussian_process._gpc.GaussianProcessClassifier.__init__.copy_X_train", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If True, a persistent copy of the training data is stored in the\nobject. Otherwise, just a reference to the training data is stored,\nwhich might cause predictions to change if the data is modified\nexternally." @@ -112769,7 +112221,7 @@ "qname": "sklearn.gaussian_process._gpc.GaussianProcessClassifier.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Determines random number generation used to initialize the centers.\nPass an int for reproducible results across multiple function calls.\nSee :term:`Glossary `." @@ -112798,7 +112250,7 @@ "qname": "sklearn.gaussian_process._gpc.GaussianProcessClassifier.__init__.multi_class", "default_value": "'one_vs_rest'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'one_vs_rest', 'one_vs_one'}, default='one_vs_rest'", "description": "Specifies how multi-class classification problems are handled.\nSupported are 'one_vs_rest' and 'one_vs_one'. In 'one_vs_rest',\none binary Gaussian process classifier is fitted for each class, which\nis trained to separate this class from the rest. In 'one_vs_one', one\nbinary Gaussian process classifier is fitted for each pair of classes,\nwhich is trained to separate these two classes. The predictions of\nthese binary predictors are combined into multi-class predictions.\nNote that 'one_vs_one' does not support predicting probability\nestimates." @@ -112814,7 +112266,7 @@ "qname": "sklearn.gaussian_process._gpc.GaussianProcessClassifier.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "The number of jobs to use for the computation: the specified\nmulticlass problems are computed in parallel.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details." @@ -112826,7 +112278,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -112842,7 +112294,7 @@ "qname": "sklearn.gaussian_process._gpc.GaussianProcessClassifier.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -112855,7 +112307,7 @@ "qname": "sklearn.gaussian_process._gpc.GaussianProcessClassifier.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features) or list of object", "description": "Feature vectors or other representations of training data." @@ -112880,7 +112332,7 @@ "qname": "sklearn.gaussian_process._gpc.GaussianProcessClassifier.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "Target values, must be binary." @@ -112892,7 +112344,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit Gaussian process classification model.", "docstring": "Fit Gaussian process classification model.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features) or list of object\n Feature vectors or other representations of training data.\n\n y : array-like of shape (n_samples,)\n Target values, must be binary.\n\n Returns\n -------\n self : object\n Returns an instance of self.\n " }, @@ -112908,7 +112360,7 @@ "qname": "sklearn.gaussian_process._gpc.GaussianProcessClassifier.kernel_.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -112917,7 +112369,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Return the kernel of the base estimator.", "docstring": "Return the kernel of the base estimator." }, @@ -112933,7 +112385,7 @@ "qname": "sklearn.gaussian_process._gpc.GaussianProcessClassifier.log_marginal_likelihood.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -112946,7 +112398,7 @@ "qname": "sklearn.gaussian_process._gpc.GaussianProcessClassifier.log_marginal_likelihood.theta", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_kernel_params,), default=None", "description": "Kernel hyperparameters for which the log-marginal likelihood is\nevaluated. In the case of multi-class classification, theta may\nbe the hyperparameters of the compound kernel or of an individual\nkernel. In the latter case, all individual kernel get assigned the\nsame theta values. If None, the precomputed log_marginal_likelihood\nof ``self.kernel_.theta`` is returned." @@ -112962,7 +112414,7 @@ "qname": "sklearn.gaussian_process._gpc.GaussianProcessClassifier.log_marginal_likelihood.eval_gradient", "default_value": "False", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If True, the gradient of the log-marginal likelihood with respect\nto the kernel hyperparameters at position theta is returned\nadditionally. Note that gradient computation is not supported\nfor non-binary classification. If True, theta must not be None." @@ -112978,7 +112430,7 @@ "qname": "sklearn.gaussian_process._gpc.GaussianProcessClassifier.log_marginal_likelihood.clone_kernel", "default_value": "True", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If True, the kernel attribute is copied. If False, the kernel\nattribute is modified, but may result in a performance improvement." @@ -112990,7 +112442,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Return log-marginal likelihood of theta for training data.\n\nIn the case of multi-class classification, the mean log-marginal\nlikelihood of the one-versus-rest classifiers are returned.", "docstring": "Return log-marginal likelihood of theta for training data.\n\n In the case of multi-class classification, the mean log-marginal\n likelihood of the one-versus-rest classifiers are returned.\n\n Parameters\n ----------\n theta : array-like of shape (n_kernel_params,), default=None\n Kernel hyperparameters for which the log-marginal likelihood is\n evaluated. In the case of multi-class classification, theta may\n be the hyperparameters of the compound kernel or of an individual\n kernel. In the latter case, all individual kernel get assigned the\n same theta values. If None, the precomputed log_marginal_likelihood\n of ``self.kernel_.theta`` is returned.\n\n eval_gradient : bool, default=False\n If True, the gradient of the log-marginal likelihood with respect\n to the kernel hyperparameters at position theta is returned\n additionally. Note that gradient computation is not supported\n for non-binary classification. If True, theta must not be None.\n\n clone_kernel : bool, default=True\n If True, the kernel attribute is copied. If False, the kernel\n attribute is modified, but may result in a performance improvement.\n\n Returns\n -------\n log_likelihood : float\n Log-marginal likelihood of theta for training data.\n\n log_likelihood_gradient : ndarray of shape (n_kernel_params,), optional\n Gradient of the log-marginal likelihood with respect to the kernel\n hyperparameters at position theta.\n Only returned when `eval_gradient` is True.\n " }, @@ -113006,7 +112458,7 @@ "qname": "sklearn.gaussian_process._gpc.GaussianProcessClassifier.predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -113019,7 +112471,7 @@ "qname": "sklearn.gaussian_process._gpc.GaussianProcessClassifier.predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features) or list of object", "description": "Query points where the GP is evaluated for classification." @@ -113040,7 +112492,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Perform classification on an array of test vectors X.", "docstring": "Perform classification on an array of test vectors X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features) or list of object\n Query points where the GP is evaluated for classification.\n\n Returns\n -------\n C : ndarray of shape (n_samples,)\n Predicted target values for X, values are from ``classes_``.\n " }, @@ -113056,7 +112508,7 @@ "qname": "sklearn.gaussian_process._gpc.GaussianProcessClassifier.predict_proba.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -113069,7 +112521,7 @@ "qname": "sklearn.gaussian_process._gpc.GaussianProcessClassifier.predict_proba.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features) or list of object", "description": "Query points where the GP is evaluated for classification." @@ -113090,7 +112542,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Return probability estimates for the test vector X.", "docstring": "Return probability estimates for the test vector X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features) or list of object\n Query points where the GP is evaluated for classification.\n\n Returns\n -------\n C : array-like of shape (n_samples, n_classes)\n Returns the probability of the samples for each class in\n the model. The columns correspond to the classes in sorted\n order, as they appear in the attribute :term:`classes_`.\n " }, @@ -113619,7 +113071,7 @@ "qname": "sklearn.gaussian_process._gpr.GaussianProcessRegressor.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -113632,7 +113084,7 @@ "qname": "sklearn.gaussian_process._gpr.GaussianProcessRegressor.__init__.kernel", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "kernel instance, default=None", "description": "The kernel specifying the covariance function of the GP. If None is\npassed, the kernel ``ConstantKernel(1.0, constant_value_bounds=\"fixed\")\n* RBF(1.0, length_scale_bounds=\"fixed\")`` is used as default. Note that\nthe kernel hyperparameters are optimized during fitting unless the\nbounds are marked as \"fixed\"." @@ -113648,7 +113100,7 @@ "qname": "sklearn.gaussian_process._gpr.GaussianProcessRegressor.__init__.alpha", "default_value": "1e-10", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float or ndarray of shape (n_samples,), default=1e-10", "description": "Value added to the diagonal of the kernel matrix during fitting.\nThis can prevent a potential numerical issue during fitting, by\nensuring that the calculated values form a positive definite matrix.\nIt can also be interpreted as the variance of additional Gaussian\nmeasurement noise on the training observations. Note that this is\ndifferent from using a `WhiteKernel`. If an array is passed, it must\nhave the same number of entries as the data used for fitting and is\nused as datapoint-dependent noise level. Allowing to specify the\nnoise level directly as a parameter is mainly for convenience and\nfor consistency with :class:`~sklearn.linear_model.Ridge`." @@ -113673,7 +113125,7 @@ "qname": "sklearn.gaussian_process._gpr.GaussianProcessRegressor.__init__.optimizer", "default_value": "'fmin_l_bfgs_b'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "\"fmin_l_bfgs_b\" or callable, default=\"fmin_l_bfgs_b\"", "description": "Can either be one of the internally supported optimizers for optimizing\nthe kernel's parameters, specified by a string, or an externally\ndefined optimizer passed as a callable. If a callable is passed, it\nmust have the signature::\n\n def optimizer(obj_func, initial_theta, bounds):\n # * 'obj_func': the objective function to be minimized, which\n # takes the hyperparameters theta as a parameter and an\n # optional flag eval_gradient, which determines if the\n # gradient is returned additionally to the function value\n # * 'initial_theta': the initial value for theta, which can be\n # used by local optimizers\n # * 'bounds': the bounds on the values of theta\n ....\n # Returned are the best found hyperparameters theta and\n # the corresponding value of the target function.\n return theta_opt, func_min\n\nPer default, the L-BFGS-B algorithm from `scipy.optimize.minimize`\nis used. If None is passed, the kernel's parameters are kept fixed.\nAvailable internal optimizers are: `{'fmin_l_bfgs_b'}`." @@ -113698,7 +113150,7 @@ "qname": "sklearn.gaussian_process._gpr.GaussianProcessRegressor.__init__.n_restarts_optimizer", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "The number of restarts of the optimizer for finding the kernel's\nparameters which maximize the log-marginal likelihood. The first run\nof the optimizer is performed from the kernel's initial parameters,\nthe remaining ones (if any) from thetas sampled log-uniform randomly\nfrom the space of allowed theta-values. If greater than 0, all bounds\nmust be finite. Note that `n_restarts_optimizer == 0` implies that one\nrun is performed." @@ -113714,7 +113166,7 @@ "qname": "sklearn.gaussian_process._gpr.GaussianProcessRegressor.__init__.normalize_y", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Whether or not to normalize the target values `y` by removing the mean\nand scaling to unit-variance. This is recommended for cases where\nzero-mean, unit-variance priors are used. Note that, in this\nimplementation, the normalisation is reversed before the GP predictions\nare reported.\n\n.. versionchanged:: 0.23" @@ -113730,7 +113182,7 @@ "qname": "sklearn.gaussian_process._gpr.GaussianProcessRegressor.__init__.copy_X_train", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If True, a persistent copy of the training data is stored in the\nobject. Otherwise, just a reference to the training data is stored,\nwhich might cause predictions to change if the data is modified\nexternally." @@ -113746,7 +113198,7 @@ "qname": "sklearn.gaussian_process._gpr.GaussianProcessRegressor.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Determines random number generation used to initialize the centers.\nPass an int for reproducible results across multiple function calls.\nSee :term:`Glossary `." @@ -113771,7 +113223,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -113876,7 +113328,7 @@ "qname": "sklearn.gaussian_process._gpr.GaussianProcessRegressor.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -113889,7 +113341,7 @@ "qname": "sklearn.gaussian_process._gpr.GaussianProcessRegressor.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features) or list of object", "description": "Feature vectors or other representations of training data." @@ -113914,7 +113366,7 @@ "qname": "sklearn.gaussian_process._gpr.GaussianProcessRegressor.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,) or (n_samples, n_targets)", "description": "Target values." @@ -113926,7 +113378,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit Gaussian process regression model.", "docstring": "Fit Gaussian process regression model.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features) or list of object\n Feature vectors or other representations of training data.\n\n y : array-like of shape (n_samples,) or (n_samples, n_targets)\n Target values.\n\n Returns\n -------\n self : object\n GaussianProcessRegressor class instance.\n " }, @@ -113942,7 +113394,7 @@ "qname": "sklearn.gaussian_process._gpr.GaussianProcessRegressor.log_marginal_likelihood.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -113955,7 +113407,7 @@ "qname": "sklearn.gaussian_process._gpr.GaussianProcessRegressor.log_marginal_likelihood.theta", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_kernel_params,) default=None", "description": "Kernel hyperparameters for which the log-marginal likelihood is\nevaluated. If None, the precomputed log_marginal_likelihood\nof ``self.kernel_.theta`` is returned." @@ -113971,7 +113423,7 @@ "qname": "sklearn.gaussian_process._gpr.GaussianProcessRegressor.log_marginal_likelihood.eval_gradient", "default_value": "False", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If True, the gradient of the log-marginal likelihood with respect\nto the kernel hyperparameters at position theta is returned\nadditionally. If True, theta must not be None." @@ -113987,7 +113439,7 @@ "qname": "sklearn.gaussian_process._gpr.GaussianProcessRegressor.log_marginal_likelihood.clone_kernel", "default_value": "True", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If True, the kernel attribute is copied. If False, the kernel\nattribute is modified, but may result in a performance improvement." @@ -113999,7 +113451,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Return log-marginal likelihood of theta for training data.", "docstring": "Return log-marginal likelihood of theta for training data.\n\n Parameters\n ----------\n theta : array-like of shape (n_kernel_params,) default=None\n Kernel hyperparameters for which the log-marginal likelihood is\n evaluated. If None, the precomputed log_marginal_likelihood\n of ``self.kernel_.theta`` is returned.\n\n eval_gradient : bool, default=False\n If True, the gradient of the log-marginal likelihood with respect\n to the kernel hyperparameters at position theta is returned\n additionally. If True, theta must not be None.\n\n clone_kernel : bool, default=True\n If True, the kernel attribute is copied. If False, the kernel\n attribute is modified, but may result in a performance improvement.\n\n Returns\n -------\n log_likelihood : float\n Log-marginal likelihood of theta for training data.\n\n log_likelihood_gradient : ndarray of shape (n_kernel_params,), optional\n Gradient of the log-marginal likelihood with respect to the kernel\n hyperparameters at position theta.\n Only returned when eval_gradient is True.\n " }, @@ -114015,7 +113467,7 @@ "qname": "sklearn.gaussian_process._gpr.GaussianProcessRegressor.predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -114028,7 +113480,7 @@ "qname": "sklearn.gaussian_process._gpr.GaussianProcessRegressor.predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features) or list of object", "description": "Query points where the GP is evaluated." @@ -114053,7 +113505,7 @@ "qname": "sklearn.gaussian_process._gpr.GaussianProcessRegressor.predict.return_std", "default_value": "False", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If True, the standard-deviation of the predictive distribution at\nthe query points is returned along with the mean." @@ -114069,7 +113521,7 @@ "qname": "sklearn.gaussian_process._gpr.GaussianProcessRegressor.predict.return_cov", "default_value": "False", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If True, the covariance of the joint predictive distribution at\nthe query points is returned along with the mean." @@ -114081,7 +113533,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict using the Gaussian process regression model.\n\nWe can also predict based on an unfitted model by using the GP prior.\nIn addition to the mean of the predictive distribution, optionally also\nreturns its standard deviation (`return_std=True`) or covariance\n(`return_cov=True`). Note that at most one of the two can be requested.", "docstring": "Predict using the Gaussian process regression model.\n\n We can also predict based on an unfitted model by using the GP prior.\n In addition to the mean of the predictive distribution, optionally also\n returns its standard deviation (`return_std=True`) or covariance\n (`return_cov=True`). Note that at most one of the two can be requested.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features) or list of object\n Query points where the GP is evaluated.\n\n return_std : bool, default=False\n If True, the standard-deviation of the predictive distribution at\n the query points is returned along with the mean.\n\n return_cov : bool, default=False\n If True, the covariance of the joint predictive distribution at\n the query points is returned along with the mean.\n\n Returns\n -------\n y_mean : ndarray of shape (n_samples,) or (n_samples, n_targets)\n Mean of predictive distribution a query points.\n\n y_std : ndarray of shape (n_samples,) or (n_samples, n_targets), optional\n Standard deviation of predictive distribution at query points.\n Only returned when `return_std` is True.\n\n y_cov : ndarray of shape (n_samples, n_samples) or (n_samples, n_samples, n_targets), optional\n Covariance of joint predictive distribution a query points.\n Only returned when `return_cov` is True.\n " }, @@ -114097,7 +113549,7 @@ "qname": "sklearn.gaussian_process._gpr.GaussianProcessRegressor.sample_y.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -114110,7 +113562,7 @@ "qname": "sklearn.gaussian_process._gpr.GaussianProcessRegressor.sample_y.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples_X, n_features) or list of object", "description": "Query points where the GP is evaluated." @@ -114135,7 +113587,7 @@ "qname": "sklearn.gaussian_process._gpr.GaussianProcessRegressor.sample_y.n_samples", "default_value": "1", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1", "description": "Number of samples drawn from the Gaussian process per query point." @@ -114151,7 +113603,7 @@ "qname": "sklearn.gaussian_process._gpr.GaussianProcessRegressor.sample_y.random_state", "default_value": "0", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=0", "description": "Determines random number generation to randomly draw samples.\nPass an int for reproducible results across multiple function\ncalls.\nSee :term:`Glossary `." @@ -114176,7 +113628,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Draw samples from Gaussian process and evaluate at X.", "docstring": "Draw samples from Gaussian process and evaluate at X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples_X, n_features) or list of object\n Query points where the GP is evaluated.\n\n n_samples : int, default=1\n Number of samples drawn from the Gaussian process per query point.\n\n random_state : int, RandomState instance or None, default=0\n Determines random number generation to randomly draw samples.\n Pass an int for reproducible results across multiple function\n calls.\n See :term:`Glossary `.\n\n Returns\n -------\n y_samples : ndarray of shape (n_samples_X, n_samples), or (n_samples_X, n_targets, n_samples)\n Values of n_samples samples drawn from Gaussian process and\n evaluated at query points.\n " }, @@ -117327,15 +116779,15 @@ { "kind": "EnumType", "values": [ - "sigmoid", - "cosine", - "linear", + "rbf", "additive_chi2", "chi2", + "laplacian", "poly", - "rbf", - "polynomial", - "laplacian" + "sigmoid", + "linear", + "cosine", + "polynomial" ] }, { @@ -118689,7 +118141,7 @@ "qname": "sklearn.impute._base.MissingIndicator.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -118702,7 +118154,7 @@ "qname": "sklearn.impute._base.MissingIndicator.__init__.missing_values", "default_value": "np.nan", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, float, str, np.nan or None, default=np.nan", "description": "The placeholder for the missing values. All occurrences of\n`missing_values` will be imputed. For pandas' dataframes with\nnullable integer dtypes with missing values, `missing_values`\nshould be set to `np.nan`, since `pd.NA` will be converted to `np.nan`." @@ -118739,7 +118191,7 @@ "qname": "sklearn.impute._base.MissingIndicator.__init__.features", "default_value": "'missing-only'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'missing-only', 'all'}, default='missing-only'", "description": "Whether the imputer mask should represent all or a subset of\nfeatures.\n\n- If `'missing-only'` (default), the imputer mask will only represent\n features containing missing values during fit time.\n- If `'all'`, the imputer mask will represent all features." @@ -118755,7 +118207,7 @@ "qname": "sklearn.impute._base.MissingIndicator.__init__.sparse", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool or 'auto', default='auto'", "description": "Whether the imputer mask format should be sparse or dense.\n\n- If `'auto'` (default), the imputer mask will be of same type as\n input.\n- If `True`, the imputer mask will be a sparse matrix.\n- If `False`, the imputer mask will be a numpy array." @@ -118780,7 +118232,7 @@ "qname": "sklearn.impute._base.MissingIndicator.__init__.error_on_new", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If `True`, :meth:`transform` will raise an error when there are\nfeatures with missing values that have no missing values in\n:meth:`fit`. This is applicable only when `features='missing-only'`." @@ -118792,7 +118244,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -119013,7 +118465,7 @@ "qname": "sklearn.impute._base.MissingIndicator.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -119026,7 +118478,7 @@ "qname": "sklearn.impute._base.MissingIndicator.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Input data, where `n_samples` is the number of samples and\n`n_features` is the number of features." @@ -119051,7 +118503,7 @@ "qname": "sklearn.impute._base.MissingIndicator.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -119063,7 +118515,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the transformer on `X`.", "docstring": "Fit the transformer on `X`.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Input data, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n self : object\n Fitted estimator.\n " }, @@ -119079,7 +118531,7 @@ "qname": "sklearn.impute._base.MissingIndicator.fit_transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -119092,7 +118544,7 @@ "qname": "sklearn.impute._base.MissingIndicator.fit_transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The input data to complete." @@ -119117,7 +118569,7 @@ "qname": "sklearn.impute._base.MissingIndicator.fit_transform.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -119129,7 +118581,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Generate missing values indicator for `X`.", "docstring": "Generate missing values indicator for `X`.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input data to complete.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_features_with_missing)\n The missing indicator for input data. The data type of `Xt`\n will be boolean.\n " }, @@ -119145,7 +118597,7 @@ "qname": "sklearn.impute._base.MissingIndicator.get_feature_names_out.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -119158,7 +118610,7 @@ "qname": "sklearn.impute._base.MissingIndicator.get_feature_names_out.input_features", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of str or None, default=None", "description": "Input features.\n\n- If `input_features` is `None`, then `feature_names_in_` is\n used as feature names in. If `feature_names_in_` is not defined,\n then the following input feature names are generated:\n `[\"x0\", \"x1\", ..., \"x(n_features_in_ - 1)\"]`.\n- If `input_features` is an array-like, then `input_features` must\n match `feature_names_in_` if `feature_names_in_` is defined." @@ -119179,7 +118631,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Get output feature names for transformation.", "docstring": "Get output feature names for transformation.\n\n Parameters\n ----------\n input_features : array-like of str or None, default=None\n Input features.\n\n - If `input_features` is `None`, then `feature_names_in_` is\n used as feature names in. If `feature_names_in_` is not defined,\n then the following input feature names are generated:\n `[\"x0\", \"x1\", ..., \"x(n_features_in_ - 1)\"]`.\n - If `input_features` is an array-like, then `input_features` must\n match `feature_names_in_` if `feature_names_in_` is defined.\n\n Returns\n -------\n feature_names_out : ndarray of str objects\n Transformed feature names.\n " }, @@ -119195,7 +118647,7 @@ "qname": "sklearn.impute._base.MissingIndicator.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -119208,7 +118660,7 @@ "qname": "sklearn.impute._base.MissingIndicator.transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The input data to complete." @@ -119229,7 +118681,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Generate missing values indicator for `X`.", "docstring": "Generate missing values indicator for `X`.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input data to complete.\n\n Returns\n -------\n Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_features_with_missing)\n The missing indicator for input data. The data type of `Xt`\n will be boolean.\n " }, @@ -119245,7 +118697,7 @@ "qname": "sklearn.impute._base.SimpleImputer.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -119258,7 +118710,7 @@ "qname": "sklearn.impute._base.SimpleImputer.__init__.missing_values", "default_value": "np.nan", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, float, str, np.nan, None or pandas.NA, default=np.nan", "description": "The placeholder for the missing values. All occurrences of\n`missing_values` will be imputed. For pandas' dataframes with\nnullable integer dtypes with missing values, `missing_values`\ncan be set to either `np.nan` or `pd.NA`." @@ -119299,7 +118751,7 @@ "qname": "sklearn.impute._base.SimpleImputer.__init__.strategy", "default_value": "'mean'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str, default='mean'", "description": "The imputation strategy.\n\n- If \"mean\", then replace missing values using the mean along\n each column. Can only be used with numeric data.\n- If \"median\", then replace missing values using the median along\n each column. Can only be used with numeric data.\n- If \"most_frequent\", then replace missing using the most frequent\n value along each column. Can be used with strings or numeric data.\n If there is more than one such value, only the smallest is returned.\n- If \"constant\", then replace missing values with fill_value. Can be\n used with strings or numeric data.\n\n.. versionadded:: 0.20\n strategy=\"constant\" for fixed value imputation." @@ -119315,7 +118767,7 @@ "qname": "sklearn.impute._base.SimpleImputer.__init__.fill_value", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str or numerical value, default=None", "description": "When strategy == \"constant\", fill_value is used to replace all\noccurrences of missing_values.\nIf left to the default, fill_value will be 0 when imputing numerical\ndata and \"missing_value\" for strings or object data types." @@ -119340,7 +118792,7 @@ "qname": "sklearn.impute._base.SimpleImputer.__init__.verbose", "default_value": "'deprecated'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "Controls the verbosity of the imputer.\n\n.. deprecated:: 1.1\n The 'verbose' parameter was deprecated in version 1.1 and will be\n removed in 1.3. A warning will always be raised upon the removal of\n empty columns in the future version." @@ -119356,7 +118808,7 @@ "qname": "sklearn.impute._base.SimpleImputer.__init__.copy", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If True, a copy of X will be created. If False, imputation will\nbe done in-place whenever possible. Note that, in the following cases,\na new copy will always be made, even if `copy=False`:\n\n- If `X` is not an array of floating values;\n- If `X` is encoded as a CSR matrix;\n- If `add_indicator=True`." @@ -119372,7 +118824,7 @@ "qname": "sklearn.impute._base.SimpleImputer.__init__.add_indicator", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If True, a :class:`MissingIndicator` transform will stack onto output\nof the imputer's transform. This allows a predictive estimator\nto account for missingness despite imputation. If a feature has no\nmissing values at fit/train time, the feature won't appear on\nthe missing indicator even if there are missing values at\ntransform/test time." @@ -119384,7 +118836,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -119630,7 +119082,7 @@ "qname": "sklearn.impute._base.SimpleImputer.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -119643,7 +119095,7 @@ "qname": "sklearn.impute._base.SimpleImputer.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix}, shape (n_samples, n_features)", "description": "Input data, where `n_samples` is the number of samples and\n`n_features` is the number of features." @@ -119668,7 +119120,7 @@ "qname": "sklearn.impute._base.SimpleImputer.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present here for API consistency by convention." @@ -119680,7 +119132,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the imputer on `X`.", "docstring": "Fit the imputer on `X`.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n Input data, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n self : object\n Fitted estimator.\n " }, @@ -119696,7 +119148,7 @@ "qname": "sklearn.impute._base.SimpleImputer.get_feature_names_out.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -119709,7 +119161,7 @@ "qname": "sklearn.impute._base.SimpleImputer.get_feature_names_out.input_features", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of str or None, default=None", "description": "Input features.\n\n- If `input_features` is `None`, then `feature_names_in_` is\n used as feature names in. If `feature_names_in_` is not defined,\n then the following input feature names are generated:\n `[\"x0\", \"x1\", ..., \"x(n_features_in_ - 1)\"]`.\n- If `input_features` is an array-like, then `input_features` must\n match `feature_names_in_` if `feature_names_in_` is defined." @@ -119730,7 +119182,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Get output feature names for transformation.", "docstring": "Get output feature names for transformation.\n\n Parameters\n ----------\n input_features : array-like of str or None, default=None\n Input features.\n\n - If `input_features` is `None`, then `feature_names_in_` is\n used as feature names in. If `feature_names_in_` is not defined,\n then the following input feature names are generated:\n `[\"x0\", \"x1\", ..., \"x(n_features_in_ - 1)\"]`.\n - If `input_features` is an array-like, then `input_features` must\n match `feature_names_in_` if `feature_names_in_` is defined.\n\n Returns\n -------\n feature_names_out : ndarray of str objects\n Transformed feature names.\n " }, @@ -119746,7 +119198,7 @@ "qname": "sklearn.impute._base.SimpleImputer.inverse_transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -119759,7 +119211,7 @@ "qname": "sklearn.impute._base.SimpleImputer.inverse_transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features + n_features_missing_indicator)", "description": "The imputed data to be reverted to original data. It has to be\nan augmented array of imputed data and the missing indicator mask." @@ -119771,7 +119223,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Convert the data back to the original representation.\n\nInverts the `transform` operation performed on an array.\nThis operation can only be performed after :class:`SimpleImputer` is\ninstantiated with `add_indicator=True`.\n\nNote that `inverse_transform` can only invert the transform in\nfeatures that have binary indicators for missing values. If a feature\nhas no missing values at `fit` time, the feature won't have a binary\nindicator, and the imputation done at `transform` time won't be\ninverted.\n\n.. versionadded:: 0.24", "docstring": "Convert the data back to the original representation.\n\n Inverts the `transform` operation performed on an array.\n This operation can only be performed after :class:`SimpleImputer` is\n instantiated with `add_indicator=True`.\n\n Note that `inverse_transform` can only invert the transform in\n features that have binary indicators for missing values. If a feature\n has no missing values at `fit` time, the feature won't have a binary\n indicator, and the imputation done at `transform` time won't be\n inverted.\n\n .. versionadded:: 0.24\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features + n_features_missing_indicator)\n The imputed data to be reverted to original data. It has to be\n an augmented array of imputed data and the missing indicator mask.\n\n Returns\n -------\n X_original : ndarray of shape (n_samples, n_features)\n The original `X` with missing values as it was prior\n to imputation.\n " }, @@ -119787,7 +119239,7 @@ "qname": "sklearn.impute._base.SimpleImputer.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -119800,7 +119252,7 @@ "qname": "sklearn.impute._base.SimpleImputer.transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix}, shape (n_samples, n_features)", "description": "The input data to complete." @@ -119821,7 +119273,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Impute all missing values in `X`.", "docstring": "Impute all missing values in `X`.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n The input data to complete.\n\n Returns\n -------\n X_imputed : {ndarray, sparse matrix} of shape (n_samples, n_features_out)\n `X` with imputed values.\n " }, @@ -120180,7 +119632,7 @@ "qname": "sklearn.impute._iterative.IterativeImputer.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -120193,7 +119645,7 @@ "qname": "sklearn.impute._iterative.IterativeImputer.__init__.estimator", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "estimator object, default=BayesianRidge()", "description": "The estimator to use at each step of the round-robin imputation.\nIf `sample_posterior=True`, the estimator must support\n`return_std` in its `predict` method." @@ -120209,7 +119661,7 @@ "qname": "sklearn.impute._iterative.IterativeImputer.__init__.missing_values", "default_value": "np.nan", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or np.nan, default=np.nan", "description": "The placeholder for the missing values. All occurrences of\n`missing_values` will be imputed. For pandas' dataframes with\nnullable integer dtypes with missing values, `missing_values`\nshould be set to `np.nan`, since `pd.NA` will be converted to `np.nan`." @@ -120234,7 +119686,7 @@ "qname": "sklearn.impute._iterative.IterativeImputer.__init__.sample_posterior", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Whether to sample from the (Gaussian) predictive posterior of the\nfitted estimator for each imputation. Estimator must support\n`return_std` in its `predict` method if set to `True`. Set to\n`True` if using `IterativeImputer` for multiple imputations." @@ -120250,7 +119702,7 @@ "qname": "sklearn.impute._iterative.IterativeImputer.__init__.max_iter", "default_value": "10", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=10", "description": "Maximum number of imputation rounds to perform before returning the\nimputations computed during the final round. A round is a single\nimputation of each feature with missing values. The stopping criterion\nis met once `max(abs(X_t - X_{t-1}))/max(abs(X[known_vals])) < tol`,\nwhere `X_t` is `X` at iteration `t`. Note that early stopping is only\napplied if `sample_posterior=False`." @@ -120266,7 +119718,7 @@ "qname": "sklearn.impute._iterative.IterativeImputer.__init__.tol", "default_value": "0.001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-3", "description": "Tolerance of the stopping condition." @@ -120282,7 +119734,7 @@ "qname": "sklearn.impute._iterative.IterativeImputer.__init__.n_nearest_features", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Number of other features to use to estimate the missing values of\neach feature column. Nearness between features is measured using\nthe absolute correlation coefficient between each feature pair (after\ninitial imputation). To ensure coverage of features throughout the\nimputation process, the neighbor features are not necessarily nearest,\nbut are drawn with probability proportional to correlation for each\nimputed target feature. Can provide significant speed-up when the\nnumber of features is huge. If `None`, all features will be used." @@ -120298,14 +119750,14 @@ "qname": "sklearn.impute._iterative.IterativeImputer.__init__.initial_strategy", "default_value": "'mean'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'mean', 'median', 'most_frequent', 'constant'}, default='mean'", "description": "Which strategy to use to initialize the missing values. Same as the\n`strategy` parameter in :class:`~sklearn.impute.SimpleImputer`." }, "type": { "kind": "EnumType", - "values": ["median", "mean", "most_frequent", "constant"] + "values": ["mean", "constant", "median", "most_frequent"] } }, { @@ -120314,14 +119766,14 @@ "qname": "sklearn.impute._iterative.IterativeImputer.__init__.imputation_order", "default_value": "'ascending'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'ascending', 'descending', 'roman', 'arabic', 'random'}, default='ascending'", "description": "The order in which the features will be imputed. Possible values:\n\n- `'ascending'`: From features with fewest missing values to most.\n- `'descending'`: From features with most missing values to fewest.\n- `'roman'`: Left to right.\n- `'arabic'`: Right to left.\n- `'random'`: A random order for each round." }, "type": { "kind": "EnumType", - "values": ["descending", "ascending", "random", "roman", "arabic"] + "values": ["descending", "random", "arabic", "ascending", "roman"] } }, { @@ -120330,7 +119782,7 @@ "qname": "sklearn.impute._iterative.IterativeImputer.__init__.skip_complete", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If `True` then features with missing values during :meth:`transform`\nwhich did not have any missing values during :meth:`fit` will be\nimputed with the initial imputation method only. Set to `True` if you\nhave many features with no missing values at both :meth:`fit` and\n:meth:`transform` time to save compute." @@ -120346,7 +119798,7 @@ "qname": "sklearn.impute._iterative.IterativeImputer.__init__.min_value", "default_value": "-np.inf", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float or array-like of shape (n_features,), default=-np.inf", "description": "Minimum possible imputed value. Broadcast to shape `(n_features,)` if\nscalar. If array-like, expects shape `(n_features,)`, one min value for\neach feature. The default is `-np.inf`.\n\n.. versionchanged:: 0.23\n Added support for array-like." @@ -120371,7 +119823,7 @@ "qname": "sklearn.impute._iterative.IterativeImputer.__init__.max_value", "default_value": "np.inf", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float or array-like of shape (n_features,), default=np.inf", "description": "Maximum possible imputed value. Broadcast to shape `(n_features,)` if\nscalar. If array-like, expects shape `(n_features,)`, one max value for\neach feature. The default is `np.inf`.\n\n.. versionchanged:: 0.23\n Added support for array-like." @@ -120396,7 +119848,7 @@ "qname": "sklearn.impute._iterative.IterativeImputer.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "Verbosity flag, controls the debug messages that are issued\nas functions are evaluated. The higher, the more verbose. Can be 0, 1,\nor 2." @@ -120412,7 +119864,7 @@ "qname": "sklearn.impute._iterative.IterativeImputer.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "The seed of the pseudo random number generator to use. Randomizes\nselection of estimator features if `n_nearest_features` is not `None`,\nthe `imputation_order` if `random`, and the sampling from posterior if\n`sample_posterior=True`. Use an integer for determinism.\nSee :term:`the Glossary `." @@ -120441,7 +119893,7 @@ "qname": "sklearn.impute._iterative.IterativeImputer.__init__.add_indicator", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If `True`, a :class:`MissingIndicator` transform will stack onto output\nof the imputer's transform. This allows a predictive estimator\nto account for missingness despite imputation. If a feature has no\nmissing values at fit/train time, the feature won't appear on\nthe missing indicator even if there are missing values at\ntransform/test time." @@ -120453,7 +119905,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -120905,7 +120357,7 @@ "qname": "sklearn.impute._iterative.IterativeImputer.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -120918,7 +120370,7 @@ "qname": "sklearn.impute._iterative.IterativeImputer.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like, shape (n_samples, n_features)", "description": "Input data, where `n_samples` is the number of samples and\n`n_features` is the number of features." @@ -120943,7 +120395,7 @@ "qname": "sklearn.impute._iterative.IterativeImputer.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -120955,7 +120407,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the imputer on `X` and return self.", "docstring": "Fit the imputer on `X` and return self.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Input data, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n self : object\n Fitted estimator.\n " }, @@ -120971,7 +120423,7 @@ "qname": "sklearn.impute._iterative.IterativeImputer.fit_transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -120984,7 +120436,7 @@ "qname": "sklearn.impute._iterative.IterativeImputer.fit_transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like, shape (n_samples, n_features)", "description": "Input data, where `n_samples` is the number of samples and\n`n_features` is the number of features." @@ -121009,7 +120461,7 @@ "qname": "sklearn.impute._iterative.IterativeImputer.fit_transform.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -121021,7 +120473,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the imputer on `X` and return the transformed `X`.", "docstring": "Fit the imputer on `X` and return the transformed `X`.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Input data, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n Xt : array-like, shape (n_samples, n_features)\n The imputed input data.\n " }, @@ -121037,7 +120489,7 @@ "qname": "sklearn.impute._iterative.IterativeImputer.get_feature_names_out.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -121050,7 +120502,7 @@ "qname": "sklearn.impute._iterative.IterativeImputer.get_feature_names_out.input_features", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of str or None, default=None", "description": "Input features.\n\n- If `input_features` is `None`, then `feature_names_in_` is\n used as feature names in. If `feature_names_in_` is not defined,\n then the following input feature names are generated:\n `[\"x0\", \"x1\", ..., \"x(n_features_in_ - 1)\"]`.\n- If `input_features` is an array-like, then `input_features` must\n match `feature_names_in_` if `feature_names_in_` is defined." @@ -121071,7 +120523,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Get output feature names for transformation.", "docstring": "Get output feature names for transformation.\n\n Parameters\n ----------\n input_features : array-like of str or None, default=None\n Input features.\n\n - If `input_features` is `None`, then `feature_names_in_` is\n used as feature names in. If `feature_names_in_` is not defined,\n then the following input feature names are generated:\n `[\"x0\", \"x1\", ..., \"x(n_features_in_ - 1)\"]`.\n - If `input_features` is an array-like, then `input_features` must\n match `feature_names_in_` if `feature_names_in_` is defined.\n\n Returns\n -------\n feature_names_out : ndarray of str objects\n Transformed feature names.\n " }, @@ -121087,7 +120539,7 @@ "qname": "sklearn.impute._iterative.IterativeImputer.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -121100,7 +120552,7 @@ "qname": "sklearn.impute._iterative.IterativeImputer.transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "The input data to complete." @@ -121112,7 +120564,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Impute all missing values in `X`.\n\nNote that this is stochastic, and that if `random_state` is not fixed,\nrepeated calls, or permuted input, results will differ.", "docstring": "Impute all missing values in `X`.\n\n Note that this is stochastic, and that if `random_state` is not fixed,\n repeated calls, or permuted input, results will differ.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The input data to complete.\n\n Returns\n -------\n Xt : array-like, shape (n_samples, n_features)\n The imputed input data.\n " }, @@ -121128,7 +120580,7 @@ "qname": "sklearn.impute._knn.KNNImputer.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -121141,7 +120593,7 @@ "qname": "sklearn.impute._knn.KNNImputer.__init__.missing_values", "default_value": "np.nan", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, float, str, np.nan or None, default=np.nan", "description": "The placeholder for the missing values. All occurrences of\n`missing_values` will be imputed. For pandas' dataframes with\nnullable integer dtypes with missing values, `missing_values`\nshould be set to np.nan, since `pd.NA` will be converted to np.nan." @@ -121178,7 +120630,7 @@ "qname": "sklearn.impute._knn.KNNImputer.__init__.n_neighbors", "default_value": "5", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=5", "description": "Number of neighboring samples to use for imputation." @@ -121194,7 +120646,7 @@ "qname": "sklearn.impute._knn.KNNImputer.__init__.weights", "default_value": "'uniform'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'uniform', 'distance'} or callable, default='uniform'", "description": "Weight function used in prediction. Possible values:\n\n- 'uniform' : uniform weights. All points in each neighborhood are\n weighted equally.\n- 'distance' : weight points by the inverse of their distance.\n in this case, closer neighbors of a query point will have a\n greater influence than neighbors which are further away.\n- callable : a user-defined function which accepts an\n array of distances, and returns an array of the same shape\n containing the weights." @@ -121204,7 +120656,7 @@ "types": [ { "kind": "EnumType", - "values": ["distance", "uniform"] + "values": ["uniform", "distance"] }, { "kind": "NamedType", @@ -121219,7 +120671,7 @@ "qname": "sklearn.impute._knn.KNNImputer.__init__.metric", "default_value": "'nan_euclidean'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'nan_euclidean'} or callable, default='nan_euclidean'", "description": "Distance metric for searching neighbors. Possible values:\n\n- 'nan_euclidean'\n- callable : a user-defined function which conforms to the definition\n of ``_pairwise_callable(X, Y, metric, **kwds)``. The function\n accepts two arrays, X and Y, and a `missing_values` keyword in\n `kwds` and returns a scalar distance value." @@ -121244,7 +120696,7 @@ "qname": "sklearn.impute._knn.KNNImputer.__init__.copy", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If True, a copy of X will be created. If False, imputation will\nbe done in-place whenever possible." @@ -121260,7 +120712,7 @@ "qname": "sklearn.impute._knn.KNNImputer.__init__.add_indicator", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If True, a :class:`MissingIndicator` transform will stack onto the\noutput of the imputer's transform. This allows a predictive estimator\nto account for missingness despite imputation. If a feature has no\nmissing values at fit/train time, the feature won't appear on the\nmissing indicator even if there are missing values at transform/test\ntime." @@ -121272,7 +120724,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -121377,7 +120829,7 @@ "qname": "sklearn.impute._knn.KNNImputer.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -121390,7 +120842,7 @@ "qname": "sklearn.impute._knn.KNNImputer.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like shape of (n_samples, n_features)", "description": "Input data, where `n_samples` is the number of samples and\n`n_features` is the number of features." @@ -121406,7 +120858,7 @@ "qname": "sklearn.impute._knn.KNNImputer.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present here for API consistency by convention." @@ -121418,7 +120870,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the imputer on X.", "docstring": "Fit the imputer on X.\n\n Parameters\n ----------\n X : array-like shape of (n_samples, n_features)\n Input data, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n self : object\n The fitted `KNNImputer` class instance.\n " }, @@ -121434,7 +120886,7 @@ "qname": "sklearn.impute._knn.KNNImputer.get_feature_names_out.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -121447,7 +120899,7 @@ "qname": "sklearn.impute._knn.KNNImputer.get_feature_names_out.input_features", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of str or None, default=None", "description": "Input features.\n\n- If `input_features` is `None`, then `feature_names_in_` is\n used as feature names in. If `feature_names_in_` is not defined,\n then the following input feature names are generated:\n `[\"x0\", \"x1\", ..., \"x(n_features_in_ - 1)\"]`.\n- If `input_features` is an array-like, then `input_features` must\n match `feature_names_in_` if `feature_names_in_` is defined." @@ -121468,7 +120920,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Get output feature names for transformation.", "docstring": "Get output feature names for transformation.\n\n Parameters\n ----------\n input_features : array-like of str or None, default=None\n Input features.\n\n - If `input_features` is `None`, then `feature_names_in_` is\n used as feature names in. If `feature_names_in_` is not defined,\n then the following input feature names are generated:\n `[\"x0\", \"x1\", ..., \"x(n_features_in_ - 1)\"]`.\n - If `input_features` is an array-like, then `input_features` must\n match `feature_names_in_` if `feature_names_in_` is defined.\n\n Returns\n -------\n feature_names_out : ndarray of str objects\n Transformed feature names.\n " }, @@ -121484,7 +120936,7 @@ "qname": "sklearn.impute._knn.KNNImputer.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -121497,7 +120949,7 @@ "qname": "sklearn.impute._knn.KNNImputer.transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "The input data to complete." @@ -121509,7 +120961,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Impute all missing values in X.", "docstring": "Impute all missing values in X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The input data to complete.\n\n Returns\n -------\n X : array-like of shape (n_samples, n_output_features)\n The imputed dataset. `n_output_features` is the number of features\n that is not always missing during `fit`.\n " }, @@ -121820,7 +121272,7 @@ }, "type": { "kind": "EnumType", - "values": ["decision_function", "auto", "predict_proba"] + "values": ["auto", "predict_proba", "decision_function"] } }, { @@ -121868,7 +121320,7 @@ }, "type": { "kind": "EnumType", - "values": ["auto", "brute", "recursion"] + "values": ["auto", "recursion", "brute"] } }, { @@ -121884,7 +121336,7 @@ }, "type": { "kind": "EnumType", - "values": ["both", "average", "individual"] + "values": ["average", "individual", "both"] } } ], @@ -122385,7 +121837,7 @@ "qname": "sklearn.inspection._plot.decision_boundary.DecisionBoundaryDisplay.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -122398,7 +121850,7 @@ "qname": "sklearn.inspection._plot.decision_boundary.DecisionBoundaryDisplay.__init__.xx0", "default_value": null, "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (grid_resolution, grid_resolution)", "description": "First output of :func:`meshgrid `." @@ -122414,7 +121866,7 @@ "qname": "sklearn.inspection._plot.decision_boundary.DecisionBoundaryDisplay.__init__.xx1", "default_value": null, "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (grid_resolution, grid_resolution)", "description": "Second output of :func:`meshgrid `." @@ -122430,7 +121882,7 @@ "qname": "sklearn.inspection._plot.decision_boundary.DecisionBoundaryDisplay.__init__.response", "default_value": null, "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (grid_resolution, grid_resolution)", "description": "Values of the response function." @@ -122446,7 +121898,7 @@ "qname": "sklearn.inspection._plot.decision_boundary.DecisionBoundaryDisplay.__init__.xlabel", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str, default=None", "description": "Default label to place on x axis." @@ -122462,7 +121914,7 @@ "qname": "sklearn.inspection._plot.decision_boundary.DecisionBoundaryDisplay.__init__.ylabel", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str, default=None", "description": "Default label to place on y axis." @@ -122474,7 +121926,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -122490,7 +121942,7 @@ "qname": "sklearn.inspection._plot.decision_boundary.DecisionBoundaryDisplay.from_estimator.cls", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -122503,7 +121955,7 @@ "qname": "sklearn.inspection._plot.decision_boundary.DecisionBoundaryDisplay.from_estimator.estimator", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "object", "description": "Trained estimator used to plot the decision boundary." @@ -122519,7 +121971,7 @@ "qname": "sklearn.inspection._plot.decision_boundary.DecisionBoundaryDisplay.from_estimator.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix, dataframe} of shape (n_samples, 2)", "description": "Input data that should be only 2-dimensional." @@ -122544,7 +121996,7 @@ "qname": "sklearn.inspection._plot.decision_boundary.DecisionBoundaryDisplay.from_estimator.grid_resolution", "default_value": "100", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=100", "description": "Number of grid points to use for plotting decision boundary.\nHigher values will make the plot look nicer but be slower to\nrender." @@ -122560,7 +122012,7 @@ "qname": "sklearn.inspection._plot.decision_boundary.DecisionBoundaryDisplay.from_estimator.eps", "default_value": "1.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1.0", "description": "Extends the minimum and maximum values of X for evaluating the\nresponse function." @@ -122576,14 +122028,14 @@ "qname": "sklearn.inspection._plot.decision_boundary.DecisionBoundaryDisplay.from_estimator.plot_method", "default_value": "'contourf'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'contourf', 'contour', 'pcolormesh'}, default='contourf'", "description": "Plotting method to call when plotting the response. Please refer\nto the following matplotlib documentation for details:\n:func:`contourf `,\n:func:`contour `,\n:func:`pcolomesh `." }, "type": { "kind": "EnumType", - "values": ["contour", "contourf", "pcolormesh"] + "values": ["contourf", "contour", "pcolormesh"] } }, { @@ -122592,14 +122044,14 @@ "qname": "sklearn.inspection._plot.decision_boundary.DecisionBoundaryDisplay.from_estimator.response_method", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'auto', 'predict_proba', 'decision_function', 'predict'}, default='auto'", "description": "Specifies whether to use :term:`predict_proba`,\n:term:`decision_function`, :term:`predict` as the target response.\nIf set to 'auto', the response method is tried in the following order:\n:term:`decision_function`, :term:`predict_proba`, :term:`predict`.\nFor multiclass problems, :term:`predict` is selected when\n`response_method=\"auto\"`." }, "type": { "kind": "EnumType", - "values": ["decision_function", "auto", "predict", "predict_proba"] + "values": ["auto", "predict_proba", "predict", "decision_function"] } }, { @@ -122608,7 +122060,7 @@ "qname": "sklearn.inspection._plot.decision_boundary.DecisionBoundaryDisplay.from_estimator.xlabel", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str, default=None", "description": "The label used for the x-axis. If `None`, an attempt is made to\nextract a label from `X` if it is a dataframe, otherwise an empty\nstring is used." @@ -122624,7 +122076,7 @@ "qname": "sklearn.inspection._plot.decision_boundary.DecisionBoundaryDisplay.from_estimator.ylabel", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str, default=None", "description": "The label used for the y-axis. If `None`, an attempt is made to\nextract a label from `X` if it is a dataframe, otherwise an empty\nstring is used." @@ -122640,7 +122092,7 @@ "qname": "sklearn.inspection._plot.decision_boundary.DecisionBoundaryDisplay.from_estimator.ax", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "Matplotlib axes, default=None", "description": "Axes object to plot on. If `None`, a new figure and axes is\ncreated." @@ -122652,7 +122104,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Plot decision boundary given an estimator.\n\nRead more in the :ref:`User Guide `.", "docstring": "Plot decision boundary given an estimator.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n estimator : object\n Trained estimator used to plot the decision boundary.\n\n X : {array-like, sparse matrix, dataframe} of shape (n_samples, 2)\n Input data that should be only 2-dimensional.\n\n grid_resolution : int, default=100\n Number of grid points to use for plotting decision boundary.\n Higher values will make the plot look nicer but be slower to\n render.\n\n eps : float, default=1.0\n Extends the minimum and maximum values of X for evaluating the\n response function.\n\n plot_method : {'contourf', 'contour', 'pcolormesh'}, default='contourf'\n Plotting method to call when plotting the response. Please refer\n to the following matplotlib documentation for details:\n :func:`contourf `,\n :func:`contour `,\n :func:`pcolomesh `.\n\n response_method : {'auto', 'predict_proba', 'decision_function', 'predict'}, default='auto'\n Specifies whether to use :term:`predict_proba`,\n :term:`decision_function`, :term:`predict` as the target response.\n If set to 'auto', the response method is tried in the following order:\n :term:`decision_function`, :term:`predict_proba`, :term:`predict`.\n For multiclass problems, :term:`predict` is selected when\n `response_method=\"auto\"`.\n\n xlabel : str, default=None\n The label used for the x-axis. If `None`, an attempt is made to\n extract a label from `X` if it is a dataframe, otherwise an empty\n string is used.\n\n ylabel : str, default=None\n The label used for the y-axis. If `None`, an attempt is made to\n extract a label from `X` if it is a dataframe, otherwise an empty\n string is used.\n\n ax : Matplotlib axes, default=None\n Axes object to plot on. If `None`, a new figure and axes is\n created.\n\n **kwargs : dict\n Additional keyword arguments to be passed to the\n `plot_method`.\n\n Returns\n -------\n display : :class:`~sklearn.inspection.DecisionBoundaryDisplay`\n Object that stores the result.\n\n See Also\n --------\n DecisionBoundaryDisplay : Decision boundary visualization.\n ConfusionMatrixDisplay.from_estimator : Plot the confusion matrix\n given an estimator, the data, and the label.\n ConfusionMatrixDisplay.from_predictions : Plot the confusion matrix\n given the true and predicted labels.\n\n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> from sklearn.datasets import load_iris\n >>> from sklearn.linear_model import LogisticRegression\n >>> from sklearn.inspection import DecisionBoundaryDisplay\n >>> iris = load_iris()\n >>> X = iris.data[:, :2]\n >>> classifier = LogisticRegression().fit(X, iris.target)\n >>> disp = DecisionBoundaryDisplay.from_estimator(\n ... classifier, X, response_method=\"predict\",\n ... xlabel=iris.feature_names[0], ylabel=iris.feature_names[1],\n ... alpha=0.5,\n ... )\n >>> disp.ax_.scatter(X[:, 0], X[:, 1], c=iris.target, edgecolor=\"k\")\n <...>\n >>> plt.show()\n " }, @@ -122668,7 +122120,7 @@ "qname": "sklearn.inspection._plot.decision_boundary.DecisionBoundaryDisplay.plot.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -122681,14 +122133,14 @@ "qname": "sklearn.inspection._plot.decision_boundary.DecisionBoundaryDisplay.plot.plot_method", "default_value": "'contourf'", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{'contourf', 'contour', 'pcolormesh'}, default='contourf'", "description": "Plotting method to call when plotting the response. Please refer\nto the following matplotlib documentation for details:\n:func:`contourf `,\n:func:`contour `,\n:func:`pcolomesh `." }, "type": { "kind": "EnumType", - "values": ["contour", "contourf", "pcolormesh"] + "values": ["contourf", "contour", "pcolormesh"] } }, { @@ -122697,7 +122149,7 @@ "qname": "sklearn.inspection._plot.decision_boundary.DecisionBoundaryDisplay.plot.ax", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Matplotlib axes, default=None", "description": "Axes object to plot on. If `None`, a new figure and axes is\ncreated." @@ -122713,7 +122165,7 @@ "qname": "sklearn.inspection._plot.decision_boundary.DecisionBoundaryDisplay.plot.xlabel", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "str, default=None", "description": "Overwrite the x-axis label." @@ -122729,7 +122181,7 @@ "qname": "sklearn.inspection._plot.decision_boundary.DecisionBoundaryDisplay.plot.ylabel", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "str, default=None", "description": "Overwrite the y-axis label." @@ -122741,7 +122193,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Plot visualization.", "docstring": "Plot visualization.\n\n Parameters\n ----------\n plot_method : {'contourf', 'contour', 'pcolormesh'}, default='contourf'\n Plotting method to call when plotting the response. Please refer\n to the following matplotlib documentation for details:\n :func:`contourf `,\n :func:`contour `,\n :func:`pcolomesh `.\n\n ax : Matplotlib axes, default=None\n Axes object to plot on. If `None`, a new figure and axes is\n created.\n\n xlabel : str, default=None\n Overwrite the x-axis label.\n\n ylabel : str, default=None\n Overwrite the y-axis label.\n\n **kwargs : dict\n Additional keyword arguments to be passed to the `plot_method`.\n\n Returns\n -------\n display: :class:`~sklearn.inspection.DecisionBoundaryDisplay`\n " }, @@ -122780,7 +122232,7 @@ }, "type": { "kind": "EnumType", - "values": ["decision_function", "auto", "predict", "predict_proba"] + "values": ["auto", "predict_proba", "predict", "decision_function"] } } ], @@ -122801,7 +122253,7 @@ "qname": "sklearn.inspection._plot.partial_dependence.PartialDependenceDisplay.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -122814,7 +122266,7 @@ "qname": "sklearn.inspection._plot.partial_dependence.PartialDependenceDisplay.__init__.pd_results", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "list of Bunch", "description": "Results of :func:`~sklearn.inspection.partial_dependence` for\n``features``." @@ -122830,7 +122282,7 @@ "qname": "sklearn.inspection._plot.partial_dependence.PartialDependenceDisplay.__init__.features", "default_value": null, "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "list of (int,) or list of (int, int)", "description": "Indices of features for a given plot. A tuple of one integer will plot\na partial dependence curve of one feature. A tuple of two integers will\nplot a two-way partial dependence curve as a contour plot." @@ -122855,7 +122307,7 @@ "qname": "sklearn.inspection._plot.partial_dependence.PartialDependenceDisplay.__init__.feature_names", "default_value": null, "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "list of str", "description": "Feature names corresponding to the indices in ``features``." @@ -122871,7 +122323,7 @@ "qname": "sklearn.inspection._plot.partial_dependence.PartialDependenceDisplay.__init__.target_idx", "default_value": null, "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int", "description": "- In a multiclass setting, specifies the class for which the PDPs\n should be computed. Note that for binary classification, the\n positive class (index 1) is always used.\n- In a multioutput setting, specifies the task for which the PDPs\n should be computed.\n\nIgnored in binary classification or classical regression settings." @@ -122887,7 +122339,7 @@ "qname": "sklearn.inspection._plot.partial_dependence.PartialDependenceDisplay.__init__.deciles", "default_value": null, "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict", "description": "Deciles for feature indices in ``features``." @@ -122903,7 +122355,7 @@ "qname": "sklearn.inspection._plot.partial_dependence.PartialDependenceDisplay.__init__.pdp_lim", "default_value": "'deprecated'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict or None", "description": "Global min and max average predictions, such that all plots will have\nthe same scale and y limits. `pdp_lim[1]` is the global min and max for\nsingle partial dependence curves. `pdp_lim[2]` is the global min and\nmax for two-way partial dependence curves. If `None`, the limit will be\ninferred from the global minimum and maximum of all predictions.\n\n.. deprecated:: 1.1\n Pass the parameter `pdp_lim` to\n :meth:`~sklearn.inspection.PartialDependenceDisplay.plot` instead.\n It will be removed in 1.3." @@ -122928,7 +122380,7 @@ "qname": "sklearn.inspection._plot.partial_dependence.PartialDependenceDisplay.__init__.kind", "default_value": "'average'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'average', 'individual', 'both'} or list of such str, default='average'", "description": "Whether to plot the partial dependence averaged across all the samples\nin the dataset or one line per sample or both.\n\n- ``kind='average'`` results in the traditional PD plot;\n- ``kind='individual'`` results in the ICE plot;\n- ``kind='both'`` results in plotting both the ICE and PD on the same\n plot.\n\nA list of such strings can be provided to specify `kind` on a per-plot\nbasis. The length of the list should be the same as the number of\ninteraction requested in `features`.\n\n.. note::\n ICE ('individual' or 'both') is not a valid option for 2-ways\n interactions plot. As a result, an error will be raised.\n 2-ways interaction plots should always be configured to\n use the 'average' kind instead.\n\n.. note::\n The fast ``method='recursion'`` option is only available for\n ``kind='average'``. Plotting individual dependencies requires using\n the slower ``method='brute'`` option.\n\n.. versionadded:: 0.24\n Add `kind` parameter with `'average'`, `'individual'`, and `'both'`\n options.\n\n.. versionadded:: 1.1\n Add the possibility to pass a list of string specifying `kind`\n for each plot." @@ -122938,7 +122390,7 @@ "types": [ { "kind": "EnumType", - "values": ["both", "average", "individual"] + "values": ["average", "individual", "both"] }, { "kind": "NamedType", @@ -122953,7 +122405,7 @@ "qname": "sklearn.inspection._plot.partial_dependence.PartialDependenceDisplay.__init__.subsample", "default_value": "1000", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, int or None, default=1000", "description": "Sampling for ICE curves when `kind` is 'individual' or 'both'.\nIf float, should be between 0.0 and 1.0 and represent the proportion\nof the dataset to be used to plot ICE curves. If int, represents the\nmaximum absolute number of samples to use.\n\nNote that the full dataset is still used to calculate partial\ndependence when `kind='both'`.\n\n.. versionadded:: 0.24" @@ -122982,7 +122434,7 @@ "qname": "sklearn.inspection._plot.partial_dependence.PartialDependenceDisplay.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Controls the randomness of the selected samples when subsamples is not\n`None`. See :term:`Glossary ` for details.\n\n.. versionadded:: 0.24" @@ -123007,7 +122459,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -123691,7 +123143,7 @@ "qname": "sklearn.inspection._plot.partial_dependence.PartialDependenceDisplay.from_estimator.cls", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -123704,7 +123156,7 @@ "qname": "sklearn.inspection._plot.partial_dependence.PartialDependenceDisplay.from_estimator.estimator", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "BaseEstimator", "description": "A fitted estimator object implementing :term:`predict`,\n:term:`predict_proba`, or :term:`decision_function`.\nMultioutput-multiclass classifiers are not supported." @@ -123720,7 +123172,7 @@ "qname": "sklearn.inspection._plot.partial_dependence.PartialDependenceDisplay.from_estimator.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, dataframe} of shape (n_samples, n_features)", "description": "``X`` is used to generate a grid of values for the target\n``features`` (where the partial dependence will be evaluated), and\nalso to generate values for the complement features when the\n`method` is `'brute'`." @@ -123745,7 +123197,7 @@ "qname": "sklearn.inspection._plot.partial_dependence.PartialDependenceDisplay.from_estimator.features", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "list of {int, str, pair of int, pair of str}", "description": "The target features for which to create the PDPs.\nIf `features[i]` is an integer or a string, a one-way PDP is created;\nif `features[i]` is a tuple, a two-way PDP is created (only supported\nwith `kind='average'`). Each tuple must be of size 2.\nif any entry is a string, then it must be in ``feature_names``." @@ -123770,7 +123222,7 @@ "qname": "sklearn.inspection._plot.partial_dependence.PartialDependenceDisplay.from_estimator.feature_names", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_features,), dtype=str, default=None", "description": "Name of each feature; `feature_names[i]` holds the name of the feature\nwith index `i`.\nBy default, the name of the feature corresponds to their numerical\nindex for NumPy array and their column name for pandas dataframe." @@ -123795,7 +123247,7 @@ "qname": "sklearn.inspection._plot.partial_dependence.PartialDependenceDisplay.from_estimator.target", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "- In a multiclass setting, specifies the class for which the PDPs\n should be computed. Note that for binary classification, the\n positive class (index 1) is always used.\n- In a multioutput setting, specifies the task for which the PDPs\n should be computed.\n\nIgnored in binary classification or classical regression settings." @@ -123811,14 +123263,14 @@ "qname": "sklearn.inspection._plot.partial_dependence.PartialDependenceDisplay.from_estimator.response_method", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'auto', 'predict_proba', 'decision_function'}, default='auto'", "description": "Specifies whether to use :term:`predict_proba` or\n:term:`decision_function` as the target response. For regressors\nthis parameter is ignored and the response is always the output of\n:term:`predict`. By default, :term:`predict_proba` is tried first\nand we revert to :term:`decision_function` if it doesn't exist. If\n``method`` is `'recursion'`, the response is always the output of\n:term:`decision_function`." }, "type": { "kind": "EnumType", - "values": ["decision_function", "auto", "predict_proba"] + "values": ["auto", "predict_proba", "decision_function"] } }, { @@ -123827,7 +123279,7 @@ "qname": "sklearn.inspection._plot.partial_dependence.PartialDependenceDisplay.from_estimator.n_cols", "default_value": "3", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=3", "description": "The maximum number of columns in the grid plot. Only active when `ax`\nis a single axis or `None`." @@ -123843,7 +123295,7 @@ "qname": "sklearn.inspection._plot.partial_dependence.PartialDependenceDisplay.from_estimator.grid_resolution", "default_value": "100", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=100", "description": "The number of equally spaced points on the axes of the plots, for each\ntarget feature." @@ -123859,7 +123311,7 @@ "qname": "sklearn.inspection._plot.partial_dependence.PartialDependenceDisplay.from_estimator.percentiles", "default_value": "(0.05, 0.95)", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "tuple of float, default=(0.05, 0.95)", "description": "The lower and upper percentile used to create the extreme values\nfor the PDP axes. Must be in [0, 1]." @@ -123875,7 +123327,7 @@ "qname": "sklearn.inspection._plot.partial_dependence.PartialDependenceDisplay.from_estimator.method", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str, default='auto'", "description": "The method used to calculate the averaged predictions:\n\n- `'recursion'` is only supported for some tree-based estimators\n (namely\n :class:`~sklearn.ensemble.GradientBoostingClassifier`,\n :class:`~sklearn.ensemble.GradientBoostingRegressor`,\n :class:`~sklearn.ensemble.HistGradientBoostingClassifier`,\n :class:`~sklearn.ensemble.HistGradientBoostingRegressor`,\n :class:`~sklearn.tree.DecisionTreeRegressor`,\n :class:`~sklearn.ensemble.RandomForestRegressor`\n but is more efficient in terms of speed.\n With this method, the target response of a\n classifier is always the decision function, not the predicted\n probabilities. Since the `'recursion'` method implicitly computes\n the average of the ICEs by design, it is not compatible with ICE and\n thus `kind` must be `'average'`.\n\n- `'brute'` is supported for any estimator, but is more\n computationally intensive.\n\n- `'auto'`: the `'recursion'` is used for estimators that support it,\n and `'brute'` is used otherwise.\n\nPlease see :ref:`this note ` for\ndifferences between the `'brute'` and `'recursion'` method." @@ -123891,7 +123343,7 @@ "qname": "sklearn.inspection._plot.partial_dependence.PartialDependenceDisplay.from_estimator.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "The number of CPUs to use to compute the partial dependences.\nComputation is parallelized over features specified by the `features`\nparameter.\n\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details." @@ -123907,7 +123359,7 @@ "qname": "sklearn.inspection._plot.partial_dependence.PartialDependenceDisplay.from_estimator.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "Verbose output during PD computations." @@ -123923,7 +123375,7 @@ "qname": "sklearn.inspection._plot.partial_dependence.PartialDependenceDisplay.from_estimator.line_kw", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict, default=None", "description": "Dict with keywords passed to the ``matplotlib.pyplot.plot`` call.\nFor one-way partial dependence plots. It can be used to define common\nproperties for both `ice_lines_kw` and `pdp_line_kw`." @@ -123939,7 +123391,7 @@ "qname": "sklearn.inspection._plot.partial_dependence.PartialDependenceDisplay.from_estimator.ice_lines_kw", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict, default=None", "description": "Dictionary with keywords passed to the `matplotlib.pyplot.plot` call.\nFor ICE lines in the one-way partial dependence plots.\nThe key value pairs defined in `ice_lines_kw` takes priority over\n`line_kw`." @@ -123955,7 +123407,7 @@ "qname": "sklearn.inspection._plot.partial_dependence.PartialDependenceDisplay.from_estimator.pd_line_kw", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict, default=None", "description": "Dictionary with keywords passed to the `matplotlib.pyplot.plot` call.\nFor partial dependence in one-way partial dependence plots.\nThe key value pairs defined in `pd_line_kw` takes priority over\n`line_kw`." @@ -123971,7 +123423,7 @@ "qname": "sklearn.inspection._plot.partial_dependence.PartialDependenceDisplay.from_estimator.contour_kw", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict, default=None", "description": "Dict with keywords passed to the ``matplotlib.pyplot.contourf`` call.\nFor two-way partial dependence plots." @@ -123987,7 +123439,7 @@ "qname": "sklearn.inspection._plot.partial_dependence.PartialDependenceDisplay.from_estimator.ax", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "Matplotlib axes or array-like of Matplotlib axes, default=None", "description": "- If a single axis is passed in, it is treated as a bounding axes\n and a grid of partial dependence plots will be drawn within\n these bounds. The `n_cols` parameter controls the number of\n columns in the grid.\n- If an array-like of axes are passed in, the partial dependence\n plots will be drawn directly into these axes.\n- If `None`, a figure and a bounding axes is created and treated\n as the single axes case." @@ -124012,14 +123464,14 @@ "qname": "sklearn.inspection._plot.partial_dependence.PartialDependenceDisplay.from_estimator.kind", "default_value": "'average'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'average', 'individual', 'both'}, default='average'", "description": " Whether to plot the partial dependence averaged across all the samples\n in the dataset or one line per sample or both.\n\n - ``kind='average'`` results in the traditional PD plot;\n - ``kind='individual'`` results in the ICE plot.\n\nNote that the fast ``method='recursion'`` option is only available for\n``kind='average'``. Plotting individual dependencies requires using the\nslower ``method='brute'`` option." }, "type": { "kind": "EnumType", - "values": ["both", "average", "individual"] + "values": ["average", "individual", "both"] } }, { @@ -124028,7 +123480,7 @@ "qname": "sklearn.inspection._plot.partial_dependence.PartialDependenceDisplay.from_estimator.centered", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If `True`, the ICE and PD lines will start at the origin of the\ny-axis. By default, no centering is done.\n\n.. versionadded:: 1.1" @@ -124044,7 +123496,7 @@ "qname": "sklearn.inspection._plot.partial_dependence.PartialDependenceDisplay.from_estimator.subsample", "default_value": "1000", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, int or None, default=1000", "description": "Sampling for ICE curves when `kind` is 'individual' or 'both'.\nIf `float`, should be between 0.0 and 1.0 and represent the proportion\nof the dataset to be used to plot ICE curves. If `int`, represents the\nabsolute number samples to use.\n\nNote that the full dataset is still used to calculate averaged partial\ndependence when `kind='both'`." @@ -124073,7 +123525,7 @@ "qname": "sklearn.inspection._plot.partial_dependence.PartialDependenceDisplay.from_estimator.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Controls the randomness of the selected samples when subsamples is not\n`None` and `kind` is either `'both'` or `'individual'`.\nSee :term:`Glossary ` for details." @@ -124098,7 +123550,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Partial dependence (PD) and individual conditional expectation (ICE) plots.\n\nPartial dependence plots, individual conditional expectation plots or an\noverlay of both of them can be plotted by setting the ``kind``\nparameter. The ``len(features)`` plots are arranged in a grid with\n``n_cols`` columns. Two-way partial dependence plots are plotted as\ncontour plots. The deciles of the feature values will be shown with tick\nmarks on the x-axes for one-way plots, and on both axes for two-way\nplots.\n\nRead more in the :ref:`User Guide `.\n\n.. note::\n\n :func:`PartialDependenceDisplay.from_estimator` does not support using the\n same axes with multiple calls. To plot the partial dependence for\n multiple estimators, please pass the axes created by the first call to the\n second call::\n\n >>> from sklearn.inspection import PartialDependenceDisplay\n >>> from sklearn.datasets import make_friedman1\n >>> from sklearn.linear_model import LinearRegression\n >>> from sklearn.ensemble import RandomForestRegressor\n >>> X, y = make_friedman1()\n >>> est1 = LinearRegression().fit(X, y)\n >>> est2 = RandomForestRegressor().fit(X, y)\n >>> disp1 = PartialDependenceDisplay.from_estimator(est1, X,\n ... [1, 2])\n >>> disp2 = PartialDependenceDisplay.from_estimator(est2, X, [1, 2],\n ... ax=disp1.axes_)\n\n.. warning::\n\n For :class:`~sklearn.ensemble.GradientBoostingClassifier` and\n :class:`~sklearn.ensemble.GradientBoostingRegressor`, the\n `'recursion'` method (used by default) will not account for the `init`\n predictor of the boosting process. In practice, this will produce\n the same values as `'brute'` up to a constant offset in the target\n response, provided that `init` is a constant estimator (which is the\n default). However, if `init` is not a constant estimator, the\n partial dependence values are incorrect for `'recursion'` because the\n offset will be sample-dependent. It is preferable to use the `'brute'`\n method. Note that this only applies to\n :class:`~sklearn.ensemble.GradientBoostingClassifier` and\n :class:`~sklearn.ensemble.GradientBoostingRegressor`, not to\n :class:`~sklearn.ensemble.HistGradientBoostingClassifier` and\n :class:`~sklearn.ensemble.HistGradientBoostingRegressor`.\n\n.. versionadded:: 1.0", "docstring": "Partial dependence (PD) and individual conditional expectation (ICE) plots.\n\n Partial dependence plots, individual conditional expectation plots or an\n overlay of both of them can be plotted by setting the ``kind``\n parameter. The ``len(features)`` plots are arranged in a grid with\n ``n_cols`` columns. Two-way partial dependence plots are plotted as\n contour plots. The deciles of the feature values will be shown with tick\n marks on the x-axes for one-way plots, and on both axes for two-way\n plots.\n\n Read more in the :ref:`User Guide `.\n\n .. note::\n\n :func:`PartialDependenceDisplay.from_estimator` does not support using the\n same axes with multiple calls. To plot the partial dependence for\n multiple estimators, please pass the axes created by the first call to the\n second call::\n\n >>> from sklearn.inspection import PartialDependenceDisplay\n >>> from sklearn.datasets import make_friedman1\n >>> from sklearn.linear_model import LinearRegression\n >>> from sklearn.ensemble import RandomForestRegressor\n >>> X, y = make_friedman1()\n >>> est1 = LinearRegression().fit(X, y)\n >>> est2 = RandomForestRegressor().fit(X, y)\n >>> disp1 = PartialDependenceDisplay.from_estimator(est1, X,\n ... [1, 2])\n >>> disp2 = PartialDependenceDisplay.from_estimator(est2, X, [1, 2],\n ... ax=disp1.axes_)\n\n .. warning::\n\n For :class:`~sklearn.ensemble.GradientBoostingClassifier` and\n :class:`~sklearn.ensemble.GradientBoostingRegressor`, the\n `'recursion'` method (used by default) will not account for the `init`\n predictor of the boosting process. In practice, this will produce\n the same values as `'brute'` up to a constant offset in the target\n response, provided that `init` is a constant estimator (which is the\n default). However, if `init` is not a constant estimator, the\n partial dependence values are incorrect for `'recursion'` because the\n offset will be sample-dependent. It is preferable to use the `'brute'`\n method. Note that this only applies to\n :class:`~sklearn.ensemble.GradientBoostingClassifier` and\n :class:`~sklearn.ensemble.GradientBoostingRegressor`, not to\n :class:`~sklearn.ensemble.HistGradientBoostingClassifier` and\n :class:`~sklearn.ensemble.HistGradientBoostingRegressor`.\n\n .. versionadded:: 1.0\n\n Parameters\n ----------\n estimator : BaseEstimator\n A fitted estimator object implementing :term:`predict`,\n :term:`predict_proba`, or :term:`decision_function`.\n Multioutput-multiclass classifiers are not supported.\n\n X : {array-like, dataframe} of shape (n_samples, n_features)\n ``X`` is used to generate a grid of values for the target\n ``features`` (where the partial dependence will be evaluated), and\n also to generate values for the complement features when the\n `method` is `'brute'`.\n\n features : list of {int, str, pair of int, pair of str}\n The target features for which to create the PDPs.\n If `features[i]` is an integer or a string, a one-way PDP is created;\n if `features[i]` is a tuple, a two-way PDP is created (only supported\n with `kind='average'`). Each tuple must be of size 2.\n if any entry is a string, then it must be in ``feature_names``.\n\n feature_names : array-like of shape (n_features,), dtype=str, default=None\n Name of each feature; `feature_names[i]` holds the name of the feature\n with index `i`.\n By default, the name of the feature corresponds to their numerical\n index for NumPy array and their column name for pandas dataframe.\n\n target : int, default=None\n - In a multiclass setting, specifies the class for which the PDPs\n should be computed. Note that for binary classification, the\n positive class (index 1) is always used.\n - In a multioutput setting, specifies the task for which the PDPs\n should be computed.\n\n Ignored in binary classification or classical regression settings.\n\n response_method : {'auto', 'predict_proba', 'decision_function'}, default='auto'\n Specifies whether to use :term:`predict_proba` or\n :term:`decision_function` as the target response. For regressors\n this parameter is ignored and the response is always the output of\n :term:`predict`. By default, :term:`predict_proba` is tried first\n and we revert to :term:`decision_function` if it doesn't exist. If\n ``method`` is `'recursion'`, the response is always the output of\n :term:`decision_function`.\n\n n_cols : int, default=3\n The maximum number of columns in the grid plot. Only active when `ax`\n is a single axis or `None`.\n\n grid_resolution : int, default=100\n The number of equally spaced points on the axes of the plots, for each\n target feature.\n\n percentiles : tuple of float, default=(0.05, 0.95)\n The lower and upper percentile used to create the extreme values\n for the PDP axes. Must be in [0, 1].\n\n method : str, default='auto'\n The method used to calculate the averaged predictions:\n\n - `'recursion'` is only supported for some tree-based estimators\n (namely\n :class:`~sklearn.ensemble.GradientBoostingClassifier`,\n :class:`~sklearn.ensemble.GradientBoostingRegressor`,\n :class:`~sklearn.ensemble.HistGradientBoostingClassifier`,\n :class:`~sklearn.ensemble.HistGradientBoostingRegressor`,\n :class:`~sklearn.tree.DecisionTreeRegressor`,\n :class:`~sklearn.ensemble.RandomForestRegressor`\n but is more efficient in terms of speed.\n With this method, the target response of a\n classifier is always the decision function, not the predicted\n probabilities. Since the `'recursion'` method implicitly computes\n the average of the ICEs by design, it is not compatible with ICE and\n thus `kind` must be `'average'`.\n\n - `'brute'` is supported for any estimator, but is more\n computationally intensive.\n\n - `'auto'`: the `'recursion'` is used for estimators that support it,\n and `'brute'` is used otherwise.\n\n Please see :ref:`this note ` for\n differences between the `'brute'` and `'recursion'` method.\n\n n_jobs : int, default=None\n The number of CPUs to use to compute the partial dependences.\n Computation is parallelized over features specified by the `features`\n parameter.\n\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary `\n for more details.\n\n verbose : int, default=0\n Verbose output during PD computations.\n\n line_kw : dict, default=None\n Dict with keywords passed to the ``matplotlib.pyplot.plot`` call.\n For one-way partial dependence plots. It can be used to define common\n properties for both `ice_lines_kw` and `pdp_line_kw`.\n\n ice_lines_kw : dict, default=None\n Dictionary with keywords passed to the `matplotlib.pyplot.plot` call.\n For ICE lines in the one-way partial dependence plots.\n The key value pairs defined in `ice_lines_kw` takes priority over\n `line_kw`.\n\n pd_line_kw : dict, default=None\n Dictionary with keywords passed to the `matplotlib.pyplot.plot` call.\n For partial dependence in one-way partial dependence plots.\n The key value pairs defined in `pd_line_kw` takes priority over\n `line_kw`.\n\n contour_kw : dict, default=None\n Dict with keywords passed to the ``matplotlib.pyplot.contourf`` call.\n For two-way partial dependence plots.\n\n ax : Matplotlib axes or array-like of Matplotlib axes, default=None\n - If a single axis is passed in, it is treated as a bounding axes\n and a grid of partial dependence plots will be drawn within\n these bounds. The `n_cols` parameter controls the number of\n columns in the grid.\n - If an array-like of axes are passed in, the partial dependence\n plots will be drawn directly into these axes.\n - If `None`, a figure and a bounding axes is created and treated\n as the single axes case.\n\n kind : {'average', 'individual', 'both'}, default='average'\n Whether to plot the partial dependence averaged across all the samples\n in the dataset or one line per sample or both.\n\n - ``kind='average'`` results in the traditional PD plot;\n - ``kind='individual'`` results in the ICE plot.\n\n Note that the fast ``method='recursion'`` option is only available for\n ``kind='average'``. Plotting individual dependencies requires using the\n slower ``method='brute'`` option.\n\n centered : bool, default=False\n If `True`, the ICE and PD lines will start at the origin of the\n y-axis. By default, no centering is done.\n\n .. versionadded:: 1.1\n\n subsample : float, int or None, default=1000\n Sampling for ICE curves when `kind` is 'individual' or 'both'.\n If `float`, should be between 0.0 and 1.0 and represent the proportion\n of the dataset to be used to plot ICE curves. If `int`, represents the\n absolute number samples to use.\n\n Note that the full dataset is still used to calculate averaged partial\n dependence when `kind='both'`.\n\n random_state : int, RandomState instance or None, default=None\n Controls the randomness of the selected samples when subsamples is not\n `None` and `kind` is either `'both'` or `'individual'`.\n See :term:`Glossary ` for details.\n\n Returns\n -------\n display : :class:`~sklearn.inspection.PartialDependenceDisplay`\n\n See Also\n --------\n partial_dependence : Compute Partial Dependence values.\n\n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> from sklearn.datasets import make_friedman1\n >>> from sklearn.ensemble import GradientBoostingRegressor\n >>> from sklearn.inspection import PartialDependenceDisplay\n >>> X, y = make_friedman1()\n >>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)\n >>> PartialDependenceDisplay.from_estimator(clf, X, [0, (0, 1)])\n <...>\n >>> plt.show()\n " }, @@ -124114,7 +123566,7 @@ "qname": "sklearn.inspection._plot.partial_dependence.PartialDependenceDisplay.plot.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -124127,7 +123579,7 @@ "qname": "sklearn.inspection._plot.partial_dependence.PartialDependenceDisplay.plot.ax", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "Matplotlib axes or array-like of Matplotlib axes, default=None", "description": "- If a single axis is passed in, it is treated as a bounding axes\n and a grid of partial dependence plots will be drawn within\n these bounds. The `n_cols` parameter controls the number of\n columns in the grid.\n- If an array-like of axes are passed in, the partial dependence\n plots will be drawn directly into these axes.\n- If `None`, a figure and a bounding axes is created and treated\n as the single axes case." @@ -124152,7 +123604,7 @@ "qname": "sklearn.inspection._plot.partial_dependence.PartialDependenceDisplay.plot.n_cols", "default_value": "3", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=3", "description": "The maximum number of columns in the grid plot. Only active when\n`ax` is a single axes or `None`." @@ -124168,7 +123620,7 @@ "qname": "sklearn.inspection._plot.partial_dependence.PartialDependenceDisplay.plot.line_kw", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict, default=None", "description": "Dict with keywords passed to the `matplotlib.pyplot.plot` call.\nFor one-way partial dependence plots." @@ -124184,7 +123636,7 @@ "qname": "sklearn.inspection._plot.partial_dependence.PartialDependenceDisplay.plot.ice_lines_kw", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict, default=None", "description": "Dictionary with keywords passed to the `matplotlib.pyplot.plot` call.\nFor ICE lines in the one-way partial dependence plots.\nThe key value pairs defined in `ice_lines_kw` takes priority over\n`line_kw`.\n\n.. versionadded:: 1.0" @@ -124200,7 +123652,7 @@ "qname": "sklearn.inspection._plot.partial_dependence.PartialDependenceDisplay.plot.pd_line_kw", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict, default=None", "description": "Dictionary with keywords passed to the `matplotlib.pyplot.plot` call.\nFor partial dependence in one-way partial dependence plots.\nThe key value pairs defined in `pd_line_kw` takes priority over\n`line_kw`.\n\n.. versionadded:: 1.0" @@ -124216,7 +123668,7 @@ "qname": "sklearn.inspection._plot.partial_dependence.PartialDependenceDisplay.plot.contour_kw", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict, default=None", "description": "Dict with keywords passed to the `matplotlib.pyplot.contourf`\ncall for two-way partial dependence plots." @@ -124232,7 +123684,7 @@ "qname": "sklearn.inspection._plot.partial_dependence.PartialDependenceDisplay.plot.pdp_lim", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict, default=None", "description": "Global min and max average predictions, such that all plots will have the\nsame scale and y limits. `pdp_lim[1]` is the global min and max for single\npartial dependence curves. `pdp_lim[2]` is the global min and max for\ntwo-way partial dependence curves. If `None` (default), the limit will be\ninferred from the global minimum and maximum of all predictions.\n\n.. versionadded:: 1.1" @@ -124248,7 +123700,7 @@ "qname": "sklearn.inspection._plot.partial_dependence.PartialDependenceDisplay.plot.centered", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If `True`, the ICE and PD lines will start at the origin of the\ny-axis. By default, no centering is done.\n\n.. versionadded:: 1.1" @@ -124260,7 +123712,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Plot partial dependence plots.", "docstring": "Plot partial dependence plots.\n\n Parameters\n ----------\n ax : Matplotlib axes or array-like of Matplotlib axes, default=None\n - If a single axis is passed in, it is treated as a bounding axes\n and a grid of partial dependence plots will be drawn within\n these bounds. The `n_cols` parameter controls the number of\n columns in the grid.\n - If an array-like of axes are passed in, the partial dependence\n plots will be drawn directly into these axes.\n - If `None`, a figure and a bounding axes is created and treated\n as the single axes case.\n\n n_cols : int, default=3\n The maximum number of columns in the grid plot. Only active when\n `ax` is a single axes or `None`.\n\n line_kw : dict, default=None\n Dict with keywords passed to the `matplotlib.pyplot.plot` call.\n For one-way partial dependence plots.\n\n ice_lines_kw : dict, default=None\n Dictionary with keywords passed to the `matplotlib.pyplot.plot` call.\n For ICE lines in the one-way partial dependence plots.\n The key value pairs defined in `ice_lines_kw` takes priority over\n `line_kw`.\n\n .. versionadded:: 1.0\n\n pd_line_kw : dict, default=None\n Dictionary with keywords passed to the `matplotlib.pyplot.plot` call.\n For partial dependence in one-way partial dependence plots.\n The key value pairs defined in `pd_line_kw` takes priority over\n `line_kw`.\n\n .. versionadded:: 1.0\n\n contour_kw : dict, default=None\n Dict with keywords passed to the `matplotlib.pyplot.contourf`\n call for two-way partial dependence plots.\n\n pdp_lim : dict, default=None\n Global min and max average predictions, such that all plots will have the\n same scale and y limits. `pdp_lim[1]` is the global min and max for single\n partial dependence curves. `pdp_lim[2]` is the global min and max for\n two-way partial dependence curves. If `None` (default), the limit will be\n inferred from the global minimum and maximum of all predictions.\n\n .. versionadded:: 1.1\n\n centered : bool, default=False\n If `True`, the ICE and PD lines will start at the origin of the\n y-axis. By default, no centering is done.\n\n .. versionadded:: 1.1\n\n Returns\n -------\n display : :class:`~sklearn.inspection.PartialDependenceDisplay`\n " }, @@ -124677,7 +124129,7 @@ }, "type": { "kind": "EnumType", - "values": ["decision_function", "auto", "predict_proba"] + "values": ["auto", "predict_proba", "decision_function"] } }, { @@ -124881,7 +124333,7 @@ "types": [ { "kind": "EnumType", - "values": ["both", "average", "individual"] + "values": ["average", "individual", "both"] }, { "kind": "NamedType", @@ -127765,7 +127217,7 @@ "qname": "sklearn.linear_model._base.LinearRegression.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -127778,7 +127230,7 @@ "qname": "sklearn.linear_model._base.LinearRegression.__init__.fit_intercept", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to calculate the intercept for this model. If set\nto False, no intercept will be used in calculations\n(i.e. data is expected to be centered)." @@ -127794,7 +127246,7 @@ "qname": "sklearn.linear_model._base.LinearRegression.__init__.normalize", "default_value": "'deprecated'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "This parameter is ignored when ``fit_intercept`` is set to False.\nIf True, the regressors X will be normalized before regression by\nsubtracting the mean and dividing by the l2-norm.\nIf you wish to standardize, please use\n:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``\non an estimator with ``normalize=False``.\n\n.. deprecated:: 1.0\n `normalize` was deprecated in version 1.0 and will be\n removed in 1.2." @@ -127810,7 +127262,7 @@ "qname": "sklearn.linear_model._base.LinearRegression.__init__.copy_X", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If True, X will be copied; else, it may be overwritten." @@ -127826,7 +127278,7 @@ "qname": "sklearn.linear_model._base.LinearRegression.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "The number of jobs to use for the computation. This will only provide\nspeedup in case of sufficiently large problems, that is if firstly\n`n_targets > 1` and secondly `X` is sparse or if `positive` is set\nto `True`. ``None`` means 1 unless in a\n:obj:`joblib.parallel_backend` context. ``-1`` means using all\nprocessors. See :term:`Glossary ` for more details." @@ -127842,7 +127294,7 @@ "qname": "sklearn.linear_model._base.LinearRegression.__init__.positive", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "When set to ``True``, forces the coefficients to be positive. This\noption is only supported for dense arrays.\n\n.. versionadded:: 0.24" @@ -127854,7 +127306,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -127870,7 +127322,7 @@ "qname": "sklearn.linear_model._base.LinearRegression.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -127883,7 +127335,7 @@ "qname": "sklearn.linear_model._base.LinearRegression.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Training data." @@ -127908,7 +127360,7 @@ "qname": "sklearn.linear_model._base.LinearRegression.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,) or (n_samples, n_targets)", "description": "Target values. Will be cast to X's dtype if necessary." @@ -127924,7 +127376,7 @@ "qname": "sklearn.linear_model._base.LinearRegression.fit.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "Individual weights for each sample.\n\n.. versionadded:: 0.17\n parameter *sample_weight* support to LinearRegression." @@ -127936,7 +127388,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit linear model.", "docstring": "\n Fit linear model.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training data.\n\n y : array-like of shape (n_samples,) or (n_samples, n_targets)\n Target values. Will be cast to X's dtype if necessary.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Individual weights for each sample.\n\n .. versionadded:: 0.17\n parameter *sample_weight* support to LinearRegression.\n\n Returns\n -------\n self : object\n Fitted Estimator.\n " }, @@ -128560,7 +128012,7 @@ "qname": "sklearn.linear_model._bayes.ARDRegression.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -128573,7 +128025,7 @@ "qname": "sklearn.linear_model._bayes.ARDRegression.__init__.n_iter", "default_value": "300", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=300", "description": "Maximum number of iterations." @@ -128589,7 +128041,7 @@ "qname": "sklearn.linear_model._bayes.ARDRegression.__init__.tol", "default_value": "0.001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-3", "description": "Stop the algorithm if w has converged." @@ -128605,7 +128057,7 @@ "qname": "sklearn.linear_model._bayes.ARDRegression.__init__.alpha_1", "default_value": "1e-06", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-6", "description": "Hyper-parameter : shape parameter for the Gamma distribution prior\nover the alpha parameter." @@ -128621,7 +128073,7 @@ "qname": "sklearn.linear_model._bayes.ARDRegression.__init__.alpha_2", "default_value": "1e-06", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-6", "description": "Hyper-parameter : inverse scale parameter (rate parameter) for the\nGamma distribution prior over the alpha parameter." @@ -128637,7 +128089,7 @@ "qname": "sklearn.linear_model._bayes.ARDRegression.__init__.lambda_1", "default_value": "1e-06", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-6", "description": "Hyper-parameter : shape parameter for the Gamma distribution prior\nover the lambda parameter." @@ -128653,7 +128105,7 @@ "qname": "sklearn.linear_model._bayes.ARDRegression.__init__.lambda_2", "default_value": "1e-06", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-6", "description": "Hyper-parameter : inverse scale parameter (rate parameter) for the\nGamma distribution prior over the lambda parameter." @@ -128669,7 +128121,7 @@ "qname": "sklearn.linear_model._bayes.ARDRegression.__init__.compute_score", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If True, compute the objective function at each step of the model." @@ -128685,7 +128137,7 @@ "qname": "sklearn.linear_model._bayes.ARDRegression.__init__.threshold_lambda", "default_value": "10000.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=10 000", "description": "Threshold for removing (pruning) weights with high precision from\nthe computation." @@ -128701,7 +128153,7 @@ "qname": "sklearn.linear_model._bayes.ARDRegression.__init__.fit_intercept", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to calculate the intercept for this model. If set\nto false, no intercept will be used in calculations\n(i.e. data is expected to be centered)." @@ -128717,7 +128169,7 @@ "qname": "sklearn.linear_model._bayes.ARDRegression.__init__.normalize", "default_value": "'deprecated'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "This parameter is ignored when ``fit_intercept`` is set to False.\nIf True, the regressors X will be normalized before regression by\nsubtracting the mean and dividing by the l2-norm.\nIf you wish to standardize, please use\n:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``\non an estimator with ``normalize=False``.\n\n.. deprecated:: 1.0\n ``normalize`` was deprecated in version 1.0 and will be removed in\n 1.2." @@ -128733,7 +128185,7 @@ "qname": "sklearn.linear_model._bayes.ARDRegression.__init__.copy_X", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If True, X will be copied; else, it may be overwritten." @@ -128749,7 +128201,7 @@ "qname": "sklearn.linear_model._bayes.ARDRegression.__init__.verbose", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Verbose mode when fitting the model." @@ -128761,7 +128213,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -128931,7 +128383,7 @@ "qname": "sklearn.linear_model._bayes.ARDRegression.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -128944,7 +128396,7 @@ "qname": "sklearn.linear_model._bayes.ARDRegression.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training vector, where `n_samples` is the number of samples and\n`n_features` is the number of features." @@ -128960,7 +128412,7 @@ "qname": "sklearn.linear_model._bayes.ARDRegression.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "Target values (integers). Will be cast to X's dtype if necessary." @@ -128972,7 +128424,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the model according to the given training data and parameters.\n\nIterative procedure to maximize the evidence", "docstring": "Fit the model according to the given training data and parameters.\n\n Iterative procedure to maximize the evidence\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training vector, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n y : array-like of shape (n_samples,)\n Target values (integers). Will be cast to X's dtype if necessary.\n\n Returns\n -------\n self : object\n Fitted estimator.\n " }, @@ -128988,7 +128440,7 @@ "qname": "sklearn.linear_model._bayes.ARDRegression.predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -129001,7 +128453,7 @@ "qname": "sklearn.linear_model._bayes.ARDRegression.predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Samples." @@ -129026,7 +128478,7 @@ "qname": "sklearn.linear_model._bayes.ARDRegression.predict.return_std", "default_value": "False", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Whether to return the standard deviation of posterior prediction." @@ -129038,7 +128490,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict using the linear model.\n\nIn addition to the mean of the predictive distribution, also its\nstandard deviation can be returned.", "docstring": "Predict using the linear model.\n\n In addition to the mean of the predictive distribution, also its\n standard deviation can be returned.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Samples.\n\n return_std : bool, default=False\n Whether to return the standard deviation of posterior prediction.\n\n Returns\n -------\n y_mean : array-like of shape (n_samples,)\n Mean of predictive distribution of query points.\n\n y_std : array-like of shape (n_samples,)\n Standard deviation of predictive distribution of query points.\n " }, @@ -129054,7 +128506,7 @@ "qname": "sklearn.linear_model._bayes.BayesianRidge.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -129067,7 +128519,7 @@ "qname": "sklearn.linear_model._bayes.BayesianRidge.__init__.n_iter", "default_value": "300", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=300", "description": "Maximum number of iterations. Should be greater than or equal to 1." @@ -129083,7 +128535,7 @@ "qname": "sklearn.linear_model._bayes.BayesianRidge.__init__.tol", "default_value": "0.001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-3", "description": "Stop the algorithm if w has converged." @@ -129099,7 +128551,7 @@ "qname": "sklearn.linear_model._bayes.BayesianRidge.__init__.alpha_1", "default_value": "1e-06", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-6", "description": "Hyper-parameter : shape parameter for the Gamma distribution prior\nover the alpha parameter." @@ -129115,7 +128567,7 @@ "qname": "sklearn.linear_model._bayes.BayesianRidge.__init__.alpha_2", "default_value": "1e-06", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-6", "description": "Hyper-parameter : inverse scale parameter (rate parameter) for the\nGamma distribution prior over the alpha parameter." @@ -129131,7 +128583,7 @@ "qname": "sklearn.linear_model._bayes.BayesianRidge.__init__.lambda_1", "default_value": "1e-06", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-6", "description": "Hyper-parameter : shape parameter for the Gamma distribution prior\nover the lambda parameter." @@ -129147,7 +128599,7 @@ "qname": "sklearn.linear_model._bayes.BayesianRidge.__init__.lambda_2", "default_value": "1e-06", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-6", "description": "Hyper-parameter : inverse scale parameter (rate parameter) for the\nGamma distribution prior over the lambda parameter." @@ -129163,7 +128615,7 @@ "qname": "sklearn.linear_model._bayes.BayesianRidge.__init__.alpha_init", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=None", "description": "Initial value for alpha (precision of the noise).\nIf not set, alpha_init is 1/Var(y).\n\n .. versionadded:: 0.22" @@ -129179,7 +128631,7 @@ "qname": "sklearn.linear_model._bayes.BayesianRidge.__init__.lambda_init", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=None", "description": "Initial value for lambda (precision of the weights).\nIf not set, lambda_init is 1.\n\n .. versionadded:: 0.22" @@ -129195,7 +128647,7 @@ "qname": "sklearn.linear_model._bayes.BayesianRidge.__init__.compute_score", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If True, compute the log marginal likelihood at each iteration of the\noptimization." @@ -129211,7 +128663,7 @@ "qname": "sklearn.linear_model._bayes.BayesianRidge.__init__.fit_intercept", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to calculate the intercept for this model.\nThe intercept is not treated as a probabilistic parameter\nand thus has no associated variance. If set\nto False, no intercept will be used in calculations\n(i.e. data is expected to be centered)." @@ -129227,7 +128679,7 @@ "qname": "sklearn.linear_model._bayes.BayesianRidge.__init__.normalize", "default_value": "'deprecated'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "This parameter is ignored when ``fit_intercept`` is set to False.\nIf True, the regressors X will be normalized before regression by\nsubtracting the mean and dividing by the l2-norm.\nIf you wish to standardize, please use\n:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``\non an estimator with ``normalize=False``.\n\n.. deprecated:: 1.0\n ``normalize`` was deprecated in version 1.0 and will be removed in\n 1.2." @@ -129243,7 +128695,7 @@ "qname": "sklearn.linear_model._bayes.BayesianRidge.__init__.copy_X", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If True, X will be copied; else, it may be overwritten." @@ -129259,7 +128711,7 @@ "qname": "sklearn.linear_model._bayes.BayesianRidge.__init__.verbose", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Verbose mode when fitting the model." @@ -129271,35 +128723,10 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, - { - "id": "sklearn/sklearn.linear_model._bayes/BayesianRidge/_check_params", - "name": "_check_params", - "qname": "sklearn.linear_model._bayes.BayesianRidge._check_params", - "decorators": [], - "parameters": [ - { - "id": "sklearn/sklearn.linear_model._bayes/BayesianRidge/_check_params/self", - "name": "self", - "qname": "sklearn.linear_model._bayes.BayesianRidge._check_params.self", - "default_value": null, - "assigned_by": "IMPLICIT", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - } - ], - "results": [], - "is_public": false, - "description": "Check validity of parameters and raise ValueError\nor TypeError if not valid.", - "docstring": "Check validity of parameters and raise ValueError\n or TypeError if not valid." - }, { "id": "sklearn/sklearn.linear_model._bayes/BayesianRidge/_log_marginal_likelihood", "name": "_log_marginal_likelihood", @@ -129583,7 +129010,7 @@ "qname": "sklearn.linear_model._bayes.BayesianRidge.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -129596,7 +129023,7 @@ "qname": "sklearn.linear_model._bayes.BayesianRidge.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (n_samples, n_features)", "description": "Training data." @@ -129612,7 +129039,7 @@ "qname": "sklearn.linear_model._bayes.BayesianRidge.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (n_samples,)", "description": "Target values. Will be cast to X's dtype if necessary." @@ -129628,7 +129055,7 @@ "qname": "sklearn.linear_model._bayes.BayesianRidge.fit.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (n_samples,), default=None", "description": "Individual weights for each sample.\n\n.. versionadded:: 0.20\n parameter *sample_weight* support to BayesianRidge." @@ -129640,7 +129067,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the model.", "docstring": "Fit the model.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Training data.\n y : ndarray of shape (n_samples,)\n Target values. Will be cast to X's dtype if necessary.\n\n sample_weight : ndarray of shape (n_samples,), default=None\n Individual weights for each sample.\n\n .. versionadded:: 0.20\n parameter *sample_weight* support to BayesianRidge.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n " }, @@ -129656,7 +129083,7 @@ "qname": "sklearn.linear_model._bayes.BayesianRidge.predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -129669,7 +129096,7 @@ "qname": "sklearn.linear_model._bayes.BayesianRidge.predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Samples." @@ -129694,7 +129121,7 @@ "qname": "sklearn.linear_model._bayes.BayesianRidge.predict.return_std", "default_value": "False", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Whether to return the standard deviation of posterior prediction." @@ -129706,7 +129133,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict using the linear model.\n\nIn addition to the mean of the predictive distribution, also its\nstandard deviation can be returned.", "docstring": "Predict using the linear model.\n\n In addition to the mean of the predictive distribution, also its\n standard deviation can be returned.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Samples.\n\n return_std : bool, default=False\n Whether to return the standard deviation of posterior prediction.\n\n Returns\n -------\n y_mean : array-like of shape (n_samples,)\n Mean of predictive distribution of query points.\n\n y_std : array-like of shape (n_samples,)\n Standard deviation of predictive distribution of query points.\n " }, @@ -129722,7 +129149,7 @@ "qname": "sklearn.linear_model._coordinate_descent.ElasticNet.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -129735,7 +129162,7 @@ "qname": "sklearn.linear_model._coordinate_descent.ElasticNet.__init__.alpha", "default_value": "1.0", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1.0", "description": "Constant that multiplies the penalty terms. Defaults to 1.0.\nSee the notes for the exact mathematical meaning of this\nparameter. ``alpha = 0`` is equivalent to an ordinary least square,\nsolved by the :class:`LinearRegression` object. For numerical\nreasons, using ``alpha = 0`` with the ``Lasso`` object is not advised.\nGiven this, you should use the :class:`LinearRegression` object." @@ -129751,7 +129178,7 @@ "qname": "sklearn.linear_model._coordinate_descent.ElasticNet.__init__.l1_ratio", "default_value": "0.5", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.5", "description": "The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For\n``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it\nis an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a\ncombination of L1 and L2." @@ -129767,7 +129194,7 @@ "qname": "sklearn.linear_model._coordinate_descent.ElasticNet.__init__.fit_intercept", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether the intercept should be estimated or not. If ``False``, the\ndata is assumed to be already centered." @@ -129783,7 +129210,7 @@ "qname": "sklearn.linear_model._coordinate_descent.ElasticNet.__init__.normalize", "default_value": "'deprecated'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "This parameter is ignored when ``fit_intercept`` is set to False.\nIf True, the regressors X will be normalized before regression by\nsubtracting the mean and dividing by the l2-norm.\nIf you wish to standardize, please use\n:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``\non an estimator with ``normalize=False``.\n\n.. deprecated:: 1.0\n ``normalize`` was deprecated in version 1.0 and will be removed in\n 1.2." @@ -129799,7 +129226,7 @@ "qname": "sklearn.linear_model._coordinate_descent.ElasticNet.__init__.precompute", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool or array-like of shape (n_features, n_features), default=False", "description": "Whether to use a precomputed Gram matrix to speed up\ncalculations. The Gram matrix can also be passed as argument.\nFor sparse input this option is always ``False`` to preserve sparsity." @@ -129824,7 +129251,7 @@ "qname": "sklearn.linear_model._coordinate_descent.ElasticNet.__init__.max_iter", "default_value": "1000", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1000", "description": "The maximum number of iterations." @@ -129840,7 +129267,7 @@ "qname": "sklearn.linear_model._coordinate_descent.ElasticNet.__init__.copy_X", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If ``True``, X will be copied; else, it may be overwritten." @@ -129856,7 +129283,7 @@ "qname": "sklearn.linear_model._coordinate_descent.ElasticNet.__init__.tol", "default_value": "0.0001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-4", "description": "The tolerance for the optimization: if the updates are\nsmaller than ``tol``, the optimization code checks the\ndual gap for optimality and continues until it is smaller\nthan ``tol``, see Notes below." @@ -129872,7 +129299,7 @@ "qname": "sklearn.linear_model._coordinate_descent.ElasticNet.__init__.warm_start", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "When set to ``True``, reuse the solution of the previous call to fit as\ninitialization, otherwise, just erase the previous solution.\nSee :term:`the Glossary `." @@ -129888,7 +129315,7 @@ "qname": "sklearn.linear_model._coordinate_descent.ElasticNet.__init__.positive", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "When set to ``True``, forces the coefficients to be positive." @@ -129904,7 +129331,7 @@ "qname": "sklearn.linear_model._coordinate_descent.ElasticNet.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance, default=None", "description": "The seed of the pseudo random number generator that selects a random\nfeature to update. Used when ``selection`` == 'random'.\nPass an int for reproducible output across multiple function calls.\nSee :term:`Glossary `." @@ -129929,7 +129356,7 @@ "qname": "sklearn.linear_model._coordinate_descent.ElasticNet.__init__.selection", "default_value": "'cyclic'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'cyclic', 'random'}, default='cyclic'", "description": "If set to 'random', a random coefficient is updated every iteration\nrather than looping over features sequentially by default. This\n(setting to 'random') often leads to significantly faster convergence\nespecially when tol is higher than 1e-4." @@ -129941,7 +129368,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -130007,7 +129434,7 @@ "qname": "sklearn.linear_model._coordinate_descent.ElasticNet.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -130020,7 +129447,7 @@ "qname": "sklearn.linear_model._coordinate_descent.ElasticNet.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{ndarray, sparse matrix} of (n_samples, n_features)", "description": "Data." @@ -130045,7 +129472,7 @@ "qname": "sklearn.linear_model._coordinate_descent.ElasticNet.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{ndarray, sparse matrix} of shape (n_samples,) or (n_samples, n_targets)", "description": "Target. Will be cast to X's dtype if necessary." @@ -130070,7 +129497,7 @@ "qname": "sklearn.linear_model._coordinate_descent.ElasticNet.fit.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "float or array-like of shape (n_samples,), default=None", "description": "Sample weights. Internally, the `sample_weight` vector will be\nrescaled to sum to `n_samples`.\n\n.. versionadded:: 0.23" @@ -130095,7 +129522,7 @@ "qname": "sklearn.linear_model._coordinate_descent.ElasticNet.fit.check_input", "default_value": "True", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Allow to bypass several input checking.\nDon't use this parameter unless you know what you do." @@ -130107,7 +129534,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit model with coordinate descent.", "docstring": "Fit model with coordinate descent.\n\n Parameters\n ----------\n X : {ndarray, sparse matrix} of (n_samples, n_features)\n Data.\n\n y : {ndarray, sparse matrix} of shape (n_samples,) or (n_samples, n_targets)\n Target. Will be cast to X's dtype if necessary.\n\n sample_weight : float or array-like of shape (n_samples,), default=None\n Sample weights. Internally, the `sample_weight` vector will be\n rescaled to sum to `n_samples`.\n\n .. versionadded:: 0.23\n\n check_input : bool, default=True\n Allow to bypass several input checking.\n Don't use this parameter unless you know what you do.\n\n Returns\n -------\n self : object\n Fitted estimator.\n\n Notes\n -----\n Coordinate descent is an algorithm that considers each column of\n data at a time hence it will automatically convert the X input\n as a Fortran-contiguous numpy array if necessary.\n\n To avoid memory re-allocation it is advised to allocate the\n initial data in memory directly using that format.\n " }, @@ -130123,7 +129550,7 @@ "qname": "sklearn.linear_model._coordinate_descent.ElasticNet.sparse_coef_.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -130132,7 +129559,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Sparse representation of the fitted `coef_`.", "docstring": "Sparse representation of the fitted `coef_`." }, @@ -130148,7 +129575,7 @@ "qname": "sklearn.linear_model._coordinate_descent.ElasticNetCV.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -130161,7 +129588,7 @@ "qname": "sklearn.linear_model._coordinate_descent.ElasticNetCV.__init__.l1_ratio", "default_value": "0.5", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float or list of float, default=0.5", "description": "Float between 0 and 1 passed to ElasticNet (scaling between\nl1 and l2 penalties). For ``l1_ratio = 0``\nthe penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.\nFor ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2\nThis parameter can be a list, in which case the different\nvalues are tested by cross-validation and the one giving the best\nprediction score is used. Note that a good choice of list of\nvalues for l1_ratio is often to put more values close to 1\n(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,\n.9, .95, .99, 1]``." @@ -130186,7 +129613,7 @@ "qname": "sklearn.linear_model._coordinate_descent.ElasticNetCV.__init__.eps", "default_value": "0.001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-3", "description": "Length of the path. ``eps=1e-3`` means that\n``alpha_min / alpha_max = 1e-3``." @@ -130202,7 +129629,7 @@ "qname": "sklearn.linear_model._coordinate_descent.ElasticNetCV.__init__.n_alphas", "default_value": "100", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=100", "description": "Number of alphas along the regularization path, used for each l1_ratio." @@ -130218,7 +129645,7 @@ "qname": "sklearn.linear_model._coordinate_descent.ElasticNetCV.__init__.alphas", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray, default=None", "description": "List of alphas where to compute the models.\nIf None alphas are set automatically." @@ -130234,7 +129661,7 @@ "qname": "sklearn.linear_model._coordinate_descent.ElasticNetCV.__init__.fit_intercept", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to calculate the intercept for this model. If set\nto false, no intercept will be used in calculations\n(i.e. data is expected to be centered)." @@ -130250,7 +129677,7 @@ "qname": "sklearn.linear_model._coordinate_descent.ElasticNetCV.__init__.normalize", "default_value": "'deprecated'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "This parameter is ignored when ``fit_intercept`` is set to False.\nIf True, the regressors X will be normalized before regression by\nsubtracting the mean and dividing by the l2-norm.\nIf you wish to standardize, please use\n:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``\non an estimator with ``normalize=False``.\n\n.. deprecated:: 1.0\n ``normalize`` was deprecated in version 1.0 and will be removed in\n 1.2." @@ -130266,7 +129693,7 @@ "qname": "sklearn.linear_model._coordinate_descent.ElasticNetCV.__init__.precompute", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "'auto', bool or array-like of shape (n_features, n_features), default='auto'", "description": "Whether to use a precomputed Gram matrix to speed up\ncalculations. If set to ``'auto'`` let us decide. The Gram\nmatrix can also be passed as argument." @@ -130295,7 +129722,7 @@ "qname": "sklearn.linear_model._coordinate_descent.ElasticNetCV.__init__.max_iter", "default_value": "1000", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1000", "description": "The maximum number of iterations." @@ -130311,7 +129738,7 @@ "qname": "sklearn.linear_model._coordinate_descent.ElasticNetCV.__init__.tol", "default_value": "0.0001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-4", "description": "The tolerance for the optimization: if the updates are\nsmaller than ``tol``, the optimization code checks the\ndual gap for optimality and continues until it is smaller\nthan ``tol``." @@ -130327,7 +129754,7 @@ "qname": "sklearn.linear_model._coordinate_descent.ElasticNetCV.__init__.cv", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, cross-validation generator or iterable, default=None", "description": "Determines the cross-validation splitting strategy.\nPossible inputs for cv are:\n\n- None, to use the default 5-fold cross-validation,\n- int, to specify the number of folds.\n- :term:`CV splitter`,\n- An iterable yielding (train, test) splits as arrays of indices.\n\nFor int/None inputs, :class:`KFold` is used.\n\nRefer :ref:`User Guide ` for the various\ncross-validation strategies that can be used here.\n\n.. versionchanged:: 0.22\n ``cv`` default value if None changed from 3-fold to 5-fold." @@ -130356,7 +129783,7 @@ "qname": "sklearn.linear_model._coordinate_descent.ElasticNetCV.__init__.copy_X", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If ``True``, X will be copied; else, it may be overwritten." @@ -130372,7 +129799,7 @@ "qname": "sklearn.linear_model._coordinate_descent.ElasticNetCV.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool or int, default=0", "description": "Amount of verbosity." @@ -130397,7 +129824,7 @@ "qname": "sklearn.linear_model._coordinate_descent.ElasticNetCV.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Number of CPUs to use during the cross validation.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details." @@ -130413,7 +129840,7 @@ "qname": "sklearn.linear_model._coordinate_descent.ElasticNetCV.__init__.positive", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "When set to ``True``, forces the coefficients to be positive." @@ -130429,7 +129856,7 @@ "qname": "sklearn.linear_model._coordinate_descent.ElasticNetCV.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance, default=None", "description": "The seed of the pseudo random number generator that selects a random\nfeature to update. Used when ``selection`` == 'random'.\nPass an int for reproducible output across multiple function calls.\nSee :term:`Glossary `." @@ -130454,7 +129881,7 @@ "qname": "sklearn.linear_model._coordinate_descent.ElasticNetCV.__init__.selection", "default_value": "'cyclic'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'cyclic', 'random'}, default='cyclic'", "description": "If set to 'random', a random coefficient is updated every iteration\nrather than looping over features sequentially by default. This\n(setting to 'random') often leads to significantly faster convergence\nespecially when tol is higher than 1e-4." @@ -130466,7 +129893,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -130557,7 +129984,7 @@ "qname": "sklearn.linear_model._coordinate_descent.Lasso.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -130570,7 +129997,7 @@ "qname": "sklearn.linear_model._coordinate_descent.Lasso.__init__.alpha", "default_value": "1.0", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1.0", "description": "Constant that multiplies the L1 term, controlling regularization\nstrength. `alpha` must be a non-negative float i.e. in `[0, inf)`.\n\nWhen `alpha = 0`, the objective is equivalent to ordinary least\nsquares, solved by the :class:`LinearRegression` object. For numerical\nreasons, using `alpha = 0` with the `Lasso` object is not advised.\nInstead, you should use the :class:`LinearRegression` object." @@ -130586,7 +130013,7 @@ "qname": "sklearn.linear_model._coordinate_descent.Lasso.__init__.fit_intercept", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to calculate the intercept for this model. If set\nto False, no intercept will be used in calculations\n(i.e. data is expected to be centered)." @@ -130602,7 +130029,7 @@ "qname": "sklearn.linear_model._coordinate_descent.Lasso.__init__.normalize", "default_value": "'deprecated'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "This parameter is ignored when ``fit_intercept`` is set to False.\nIf True, the regressors X will be normalized before regression by\nsubtracting the mean and dividing by the l2-norm.\nIf you wish to standardize, please use\n:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``\non an estimator with ``normalize=False``.\n\n.. deprecated:: 1.0\n ``normalize`` was deprecated in version 1.0 and will be removed in\n 1.2." @@ -130618,7 +130045,7 @@ "qname": "sklearn.linear_model._coordinate_descent.Lasso.__init__.precompute", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool or array-like of shape (n_features, n_features), default=False", "description": "Whether to use a precomputed Gram matrix to speed up\ncalculations. The Gram matrix can also be passed as argument.\nFor sparse input this option is always ``False`` to preserve sparsity." @@ -130643,7 +130070,7 @@ "qname": "sklearn.linear_model._coordinate_descent.Lasso.__init__.copy_X", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If ``True``, X will be copied; else, it may be overwritten." @@ -130659,7 +130086,7 @@ "qname": "sklearn.linear_model._coordinate_descent.Lasso.__init__.max_iter", "default_value": "1000", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1000", "description": "The maximum number of iterations." @@ -130675,7 +130102,7 @@ "qname": "sklearn.linear_model._coordinate_descent.Lasso.__init__.tol", "default_value": "0.0001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-4", "description": "The tolerance for the optimization: if the updates are\nsmaller than ``tol``, the optimization code checks the\ndual gap for optimality and continues until it is smaller\nthan ``tol``, see Notes below." @@ -130691,7 +130118,7 @@ "qname": "sklearn.linear_model._coordinate_descent.Lasso.__init__.warm_start", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "When set to True, reuse the solution of the previous call to fit as\ninitialization, otherwise, just erase the previous solution.\nSee :term:`the Glossary `." @@ -130707,7 +130134,7 @@ "qname": "sklearn.linear_model._coordinate_descent.Lasso.__init__.positive", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "When set to ``True``, forces the coefficients to be positive." @@ -130723,7 +130150,7 @@ "qname": "sklearn.linear_model._coordinate_descent.Lasso.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance, default=None", "description": "The seed of the pseudo random number generator that selects a random\nfeature to update. Used when ``selection`` == 'random'.\nPass an int for reproducible output across multiple function calls.\nSee :term:`Glossary `." @@ -130748,7 +130175,7 @@ "qname": "sklearn.linear_model._coordinate_descent.Lasso.__init__.selection", "default_value": "'cyclic'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'cyclic', 'random'}, default='cyclic'", "description": "If set to 'random', a random coefficient is updated every iteration\nrather than looping over features sequentially by default. This\n(setting to 'random') often leads to significantly faster convergence\nespecially when tol is higher than 1e-4." @@ -130760,7 +130187,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -130776,7 +130203,7 @@ "qname": "sklearn.linear_model._coordinate_descent.LassoCV.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -130789,7 +130216,7 @@ "qname": "sklearn.linear_model._coordinate_descent.LassoCV.__init__.eps", "default_value": "0.001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-3", "description": "Length of the path. ``eps=1e-3`` means that\n``alpha_min / alpha_max = 1e-3``." @@ -130805,7 +130232,7 @@ "qname": "sklearn.linear_model._coordinate_descent.LassoCV.__init__.n_alphas", "default_value": "100", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=100", "description": "Number of alphas along the regularization path." @@ -130821,7 +130248,7 @@ "qname": "sklearn.linear_model._coordinate_descent.LassoCV.__init__.alphas", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray, default=None", "description": "List of alphas where to compute the models.\nIf ``None`` alphas are set automatically." @@ -130837,7 +130264,7 @@ "qname": "sklearn.linear_model._coordinate_descent.LassoCV.__init__.fit_intercept", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to calculate the intercept for this model. If set\nto false, no intercept will be used in calculations\n(i.e. data is expected to be centered)." @@ -130853,7 +130280,7 @@ "qname": "sklearn.linear_model._coordinate_descent.LassoCV.__init__.normalize", "default_value": "'deprecated'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "This parameter is ignored when ``fit_intercept`` is set to False.\nIf True, the regressors X will be normalized before regression by\nsubtracting the mean and dividing by the l2-norm.\nIf you wish to standardize, please use\n:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``\non an estimator with ``normalize=False``.\n\n.. deprecated:: 1.0\n ``normalize`` was deprecated in version 1.0 and will be removed in\n 1.2." @@ -130869,7 +130296,7 @@ "qname": "sklearn.linear_model._coordinate_descent.LassoCV.__init__.precompute", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "'auto', bool or array-like of shape (n_features, n_features), default='auto'", "description": "Whether to use a precomputed Gram matrix to speed up\ncalculations. If set to ``'auto'`` let us decide. The Gram\nmatrix can also be passed as argument." @@ -130898,7 +130325,7 @@ "qname": "sklearn.linear_model._coordinate_descent.LassoCV.__init__.max_iter", "default_value": "1000", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1000", "description": "The maximum number of iterations." @@ -130914,7 +130341,7 @@ "qname": "sklearn.linear_model._coordinate_descent.LassoCV.__init__.tol", "default_value": "0.0001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-4", "description": "The tolerance for the optimization: if the updates are\nsmaller than ``tol``, the optimization code checks the\ndual gap for optimality and continues until it is smaller\nthan ``tol``." @@ -130930,7 +130357,7 @@ "qname": "sklearn.linear_model._coordinate_descent.LassoCV.__init__.copy_X", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If ``True``, X will be copied; else, it may be overwritten." @@ -130946,7 +130373,7 @@ "qname": "sklearn.linear_model._coordinate_descent.LassoCV.__init__.cv", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, cross-validation generator or iterable, default=None", "description": "Determines the cross-validation splitting strategy.\nPossible inputs for cv are:\n\n- None, to use the default 5-fold cross-validation,\n- int, to specify the number of folds.\n- :term:`CV splitter`,\n- An iterable yielding (train, test) splits as arrays of indices.\n\nFor int/None inputs, :class:`KFold` is used.\n\nRefer :ref:`User Guide ` for the various\ncross-validation strategies that can be used here.\n\n.. versionchanged:: 0.22\n ``cv`` default value if None changed from 3-fold to 5-fold." @@ -130975,7 +130402,7 @@ "qname": "sklearn.linear_model._coordinate_descent.LassoCV.__init__.verbose", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool or int, default=False", "description": "Amount of verbosity." @@ -131000,7 +130427,7 @@ "qname": "sklearn.linear_model._coordinate_descent.LassoCV.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Number of CPUs to use during the cross validation.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details." @@ -131016,7 +130443,7 @@ "qname": "sklearn.linear_model._coordinate_descent.LassoCV.__init__.positive", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If positive, restrict regression coefficients to be positive." @@ -131032,7 +130459,7 @@ "qname": "sklearn.linear_model._coordinate_descent.LassoCV.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance, default=None", "description": "The seed of the pseudo random number generator that selects a random\nfeature to update. Used when ``selection`` == 'random'.\nPass an int for reproducible output across multiple function calls.\nSee :term:`Glossary `." @@ -131057,7 +130484,7 @@ "qname": "sklearn.linear_model._coordinate_descent.LassoCV.__init__.selection", "default_value": "'cyclic'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'cyclic', 'random'}, default='cyclic'", "description": "If set to 'random', a random coefficient is updated every iteration\nrather than looping over features sequentially by default. This\n(setting to 'random') often leads to significantly faster convergence\nespecially when tol is higher than 1e-4." @@ -131069,7 +130496,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -131584,7 +131011,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskElasticNet.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -131597,7 +131024,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskElasticNet.__init__.alpha", "default_value": "1.0", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1.0", "description": "Constant that multiplies the L1/L2 term. Defaults to 1.0." @@ -131613,7 +131040,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskElasticNet.__init__.l1_ratio", "default_value": "0.5", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.5", "description": "The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.\nFor l1_ratio = 1 the penalty is an L1/L2 penalty. For l1_ratio = 0 it\nis an L2 penalty.\nFor ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2." @@ -131629,7 +131056,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskElasticNet.__init__.fit_intercept", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to calculate the intercept for this model. If set\nto false, no intercept will be used in calculations\n(i.e. data is expected to be centered)." @@ -131645,7 +131072,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskElasticNet.__init__.normalize", "default_value": "'deprecated'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "This parameter is ignored when ``fit_intercept`` is set to False.\nIf True, the regressors X will be normalized before regression by\nsubtracting the mean and dividing by the l2-norm.\nIf you wish to standardize, please use\n:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``\non an estimator with ``normalize=False``.\n\n.. deprecated:: 1.0\n ``normalize`` was deprecated in version 1.0 and will be removed in\n 1.2." @@ -131661,7 +131088,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskElasticNet.__init__.copy_X", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If ``True``, X will be copied; else, it may be overwritten." @@ -131677,7 +131104,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskElasticNet.__init__.max_iter", "default_value": "1000", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1000", "description": "The maximum number of iterations." @@ -131693,7 +131120,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskElasticNet.__init__.tol", "default_value": "0.0001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-4", "description": "The tolerance for the optimization: if the updates are\nsmaller than ``tol``, the optimization code checks the\ndual gap for optimality and continues until it is smaller\nthan ``tol``." @@ -131709,7 +131136,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskElasticNet.__init__.warm_start", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "When set to ``True``, reuse the solution of the previous call to fit as\ninitialization, otherwise, just erase the previous solution.\nSee :term:`the Glossary `." @@ -131725,7 +131152,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskElasticNet.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance, default=None", "description": "The seed of the pseudo random number generator that selects a random\nfeature to update. Used when ``selection`` == 'random'.\nPass an int for reproducible output across multiple function calls.\nSee :term:`Glossary `." @@ -131750,7 +131177,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskElasticNet.__init__.selection", "default_value": "'cyclic'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'cyclic', 'random'}, default='cyclic'", "description": "If set to 'random', a random coefficient is updated every iteration\nrather than looping over features sequentially by default. This\n(setting to 'random') often leads to significantly faster convergence\nespecially when tol is higher than 1e-4." @@ -131762,7 +131189,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -131803,7 +131230,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskElasticNet.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -131816,7 +131243,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskElasticNet.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (n_samples, n_features)", "description": "Data." @@ -131832,7 +131259,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskElasticNet.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (n_samples, n_targets)", "description": "Target. Will be cast to X's dtype if necessary." @@ -131844,7 +131271,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit MultiTaskElasticNet model with coordinate descent.", "docstring": "Fit MultiTaskElasticNet model with coordinate descent.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Data.\n y : ndarray of shape (n_samples, n_targets)\n Target. Will be cast to X's dtype if necessary.\n\n Returns\n -------\n self : object\n Fitted estimator.\n\n Notes\n -----\n Coordinate descent is an algorithm that considers each column of\n data at a time hence it will automatically convert the X input\n as a Fortran-contiguous numpy array if necessary.\n\n To avoid memory re-allocation it is advised to allocate the\n initial data in memory directly using that format.\n " }, @@ -131860,7 +131287,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskElasticNetCV.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -131873,7 +131300,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskElasticNetCV.__init__.l1_ratio", "default_value": "0.5", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float or list of float, default=0.5", "description": "The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.\nFor l1_ratio = 1 the penalty is an L1/L2 penalty. For l1_ratio = 0 it\nis an L2 penalty.\nFor ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.\nThis parameter can be a list, in which case the different\nvalues are tested by cross-validation and the one giving the best\nprediction score is used. Note that a good choice of list of\nvalues for l1_ratio is often to put more values close to 1\n(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,\n.9, .95, .99, 1]``." @@ -131898,7 +131325,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskElasticNetCV.__init__.eps", "default_value": "0.001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-3", "description": "Length of the path. ``eps=1e-3`` means that\n``alpha_min / alpha_max = 1e-3``." @@ -131914,7 +131341,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskElasticNetCV.__init__.n_alphas", "default_value": "100", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=100", "description": "Number of alphas along the regularization path." @@ -131930,7 +131357,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskElasticNetCV.__init__.alphas", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like, default=None", "description": "List of alphas where to compute the models.\nIf not provided, set automatically." @@ -131946,7 +131373,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskElasticNetCV.__init__.fit_intercept", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to calculate the intercept for this model. If set\nto false, no intercept will be used in calculations\n(i.e. data is expected to be centered)." @@ -131962,7 +131389,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskElasticNetCV.__init__.normalize", "default_value": "'deprecated'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "This parameter is ignored when ``fit_intercept`` is set to False.\nIf True, the regressors X will be normalized before regression by\nsubtracting the mean and dividing by the l2-norm.\nIf you wish to standardize, please use\n:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``\non an estimator with ``normalize=False``.\n\n.. deprecated:: 1.0\n ``normalize`` was deprecated in version 1.0 and will be removed in\n 1.2." @@ -131978,7 +131405,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskElasticNetCV.__init__.max_iter", "default_value": "1000", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1000", "description": "The maximum number of iterations." @@ -131994,7 +131421,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskElasticNetCV.__init__.tol", "default_value": "0.0001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-4", "description": "The tolerance for the optimization: if the updates are\nsmaller than ``tol``, the optimization code checks the\ndual gap for optimality and continues until it is smaller\nthan ``tol``." @@ -132010,7 +131437,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskElasticNetCV.__init__.cv", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, cross-validation generator or iterable, default=None", "description": "Determines the cross-validation splitting strategy.\nPossible inputs for cv are:\n\n- None, to use the default 5-fold cross-validation,\n- int, to specify the number of folds.\n- :term:`CV splitter`,\n- An iterable yielding (train, test) splits as arrays of indices.\n\nFor int/None inputs, :class:`KFold` is used.\n\nRefer :ref:`User Guide ` for the various\ncross-validation strategies that can be used here.\n\n.. versionchanged:: 0.22\n ``cv`` default value if None changed from 3-fold to 5-fold." @@ -132039,7 +131466,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskElasticNetCV.__init__.copy_X", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If ``True``, X will be copied; else, it may be overwritten." @@ -132055,7 +131482,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskElasticNetCV.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool or int, default=0", "description": "Amount of verbosity." @@ -132080,7 +131507,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskElasticNetCV.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Number of CPUs to use during the cross validation. Note that this is\nused only if multiple values for l1_ratio are given.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details." @@ -132096,7 +131523,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskElasticNetCV.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance, default=None", "description": "The seed of the pseudo random number generator that selects a random\nfeature to update. Used when ``selection`` == 'random'.\nPass an int for reproducible output across multiple function calls.\nSee :term:`Glossary `." @@ -132121,7 +131548,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskElasticNetCV.__init__.selection", "default_value": "'cyclic'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'cyclic', 'random'}, default='cyclic'", "description": "If set to 'random', a random coefficient is updated every iteration\nrather than looping over features sequentially by default. This\n(setting to 'random') often leads to significantly faster convergence\nespecially when tol is higher than 1e-4." @@ -132133,7 +131560,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -132224,7 +131651,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskElasticNetCV.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -132237,7 +131664,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskElasticNetCV.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (n_samples, n_features)", "description": "Training data." @@ -132253,7 +131680,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskElasticNetCV.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (n_samples, n_targets)", "description": "Training target variable. Will be cast to X's dtype if necessary." @@ -132265,7 +131692,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit MultiTaskElasticNet model with coordinate descent.\n\nFit is on grid of alphas and best alpha estimated by cross-validation.", "docstring": "Fit MultiTaskElasticNet model with coordinate descent.\n\n Fit is on grid of alphas and best alpha estimated by cross-validation.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Training data.\n y : ndarray of shape (n_samples, n_targets)\n Training target variable. Will be cast to X's dtype if necessary.\n\n Returns\n -------\n self : object\n Returns MultiTaskElasticNet instance.\n " }, @@ -132281,7 +131708,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskLasso.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -132294,7 +131721,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskLasso.__init__.alpha", "default_value": "1.0", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1.0", "description": "Constant that multiplies the L1/L2 term. Defaults to 1.0." @@ -132310,7 +131737,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskLasso.__init__.fit_intercept", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to calculate the intercept for this model. If set\nto false, no intercept will be used in calculations\n(i.e. data is expected to be centered)." @@ -132326,7 +131753,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskLasso.__init__.normalize", "default_value": "'deprecated'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "This parameter is ignored when ``fit_intercept`` is set to False.\nIf True, the regressors X will be normalized before regression by\nsubtracting the mean and dividing by the l2-norm.\nIf you wish to standardize, please use\n:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``\non an estimator with ``normalize=False``.\n\n.. deprecated:: 1.0\n ``normalize`` was deprecated in version 1.0 and will be removed in\n 1.2." @@ -132342,7 +131769,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskLasso.__init__.copy_X", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If ``True``, X will be copied; else, it may be overwritten." @@ -132358,7 +131785,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskLasso.__init__.max_iter", "default_value": "1000", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1000", "description": "The maximum number of iterations." @@ -132374,7 +131801,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskLasso.__init__.tol", "default_value": "0.0001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-4", "description": "The tolerance for the optimization: if the updates are\nsmaller than ``tol``, the optimization code checks the\ndual gap for optimality and continues until it is smaller\nthan ``tol``." @@ -132390,7 +131817,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskLasso.__init__.warm_start", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "When set to ``True``, reuse the solution of the previous call to fit as\ninitialization, otherwise, just erase the previous solution.\nSee :term:`the Glossary `." @@ -132406,7 +131833,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskLasso.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance, default=None", "description": "The seed of the pseudo random number generator that selects a random\nfeature to update. Used when ``selection`` == 'random'.\nPass an int for reproducible output across multiple function calls.\nSee :term:`Glossary `." @@ -132431,7 +131858,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskLasso.__init__.selection", "default_value": "'cyclic'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'cyclic', 'random'}, default='cyclic'", "description": "If set to 'random', a random coefficient is updated every iteration\nrather than looping over features sequentially by default. This\n(setting to 'random') often leads to significantly faster convergence\nespecially when tol is higher than 1e-4." @@ -132443,7 +131870,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -132459,7 +131886,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskLassoCV.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -132472,7 +131899,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskLassoCV.__init__.eps", "default_value": "0.001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-3", "description": "Length of the path. ``eps=1e-3`` means that\n``alpha_min / alpha_max = 1e-3``." @@ -132488,7 +131915,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskLassoCV.__init__.n_alphas", "default_value": "100", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=100", "description": "Number of alphas along the regularization path." @@ -132504,7 +131931,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskLassoCV.__init__.alphas", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like, default=None", "description": "List of alphas where to compute the models.\nIf not provided, set automatically." @@ -132520,7 +131947,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskLassoCV.__init__.fit_intercept", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to calculate the intercept for this model. If set\nto false, no intercept will be used in calculations\n(i.e. data is expected to be centered)." @@ -132536,7 +131963,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskLassoCV.__init__.normalize", "default_value": "'deprecated'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "This parameter is ignored when ``fit_intercept`` is set to False.\nIf True, the regressors X will be normalized before regression by\nsubtracting the mean and dividing by the l2-norm.\nIf you wish to standardize, please use\n:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``\non an estimator with ``normalize=False``.\n\n.. deprecated:: 1.0\n ``normalize`` was deprecated in version 1.0 and will be removed in\n 1.2." @@ -132552,7 +131979,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskLassoCV.__init__.max_iter", "default_value": "1000", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1000", "description": "The maximum number of iterations." @@ -132568,7 +131995,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskLassoCV.__init__.tol", "default_value": "0.0001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-4", "description": "The tolerance for the optimization: if the updates are\nsmaller than ``tol``, the optimization code checks the\ndual gap for optimality and continues until it is smaller\nthan ``tol``." @@ -132584,7 +132011,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskLassoCV.__init__.copy_X", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If ``True``, X will be copied; else, it may be overwritten." @@ -132600,7 +132027,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskLassoCV.__init__.cv", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, cross-validation generator or iterable, default=None", "description": "Determines the cross-validation splitting strategy.\nPossible inputs for cv are:\n\n- None, to use the default 5-fold cross-validation,\n- int, to specify the number of folds.\n- :term:`CV splitter`,\n- An iterable yielding (train, test) splits as arrays of indices.\n\nFor int/None inputs, :class:`KFold` is used.\n\nRefer :ref:`User Guide ` for the various\ncross-validation strategies that can be used here.\n\n.. versionchanged:: 0.22\n ``cv`` default value if None changed from 3-fold to 5-fold." @@ -132629,7 +132056,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskLassoCV.__init__.verbose", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool or int, default=False", "description": "Amount of verbosity." @@ -132654,7 +132081,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskLassoCV.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Number of CPUs to use during the cross validation. Note that this is\nused only if multiple values for l1_ratio are given.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details." @@ -132670,7 +132097,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskLassoCV.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance, default=None", "description": "The seed of the pseudo random number generator that selects a random\nfeature to update. Used when ``selection`` == 'random'.\nPass an int for reproducible output across multiple function calls.\nSee :term:`Glossary `." @@ -132695,7 +132122,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskLassoCV.__init__.selection", "default_value": "'cyclic'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'cyclic', 'random'}, default='cyclic'", "description": "If set to 'random', a random coefficient is updated every iteration\nrather than looping over features sequentially by default. This\n(setting to 'random') often leads to significantly faster convergence\nespecially when tol is higher than 1e-4." @@ -132707,7 +132134,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -132798,7 +132225,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskLassoCV.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -132811,7 +132238,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskLassoCV.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (n_samples, n_features)", "description": "Data." @@ -132827,7 +132254,7 @@ "qname": "sklearn.linear_model._coordinate_descent.MultiTaskLassoCV.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (n_samples, n_targets)", "description": "Target. Will be cast to X's dtype if necessary." @@ -132839,7 +132266,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit MultiTaskLasso model with coordinate descent.\n\nFit is on grid of alphas and best alpha estimated by cross-validation.", "docstring": "Fit MultiTaskLasso model with coordinate descent.\n\n Fit is on grid of alphas and best alpha estimated by cross-validation.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Data.\n y : ndarray of shape (n_samples, n_targets)\n Target. Will be cast to X's dtype if necessary.\n\n Returns\n -------\n self : object\n Returns an instance of fitted model.\n " }, @@ -133215,7 +132642,7 @@ }, "type": { "kind": "EnumType", - "values": ["C", "F"] + "values": ["F", "C"] } }, { @@ -133300,7 +132727,7 @@ }, "type": { "kind": "EnumType", - "values": ["C", "F"] + "values": ["F", "C"] } } ], @@ -133841,7 +133268,7 @@ "qname": "sklearn.linear_model._glm.glm.GammaRegressor.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -133854,7 +133281,7 @@ "qname": "sklearn.linear_model._glm.glm.GammaRegressor.__init__.alpha", "default_value": "1.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1", "description": "Constant that multiplies the penalty term and thus determines the\nregularization strength. ``alpha = 0`` is equivalent to unpenalized\nGLMs. In this case, the design matrix `X` must have full column rank\n(no collinearities).\nValues must be in the range `[0.0, inf)`." @@ -133870,7 +133297,7 @@ "qname": "sklearn.linear_model._glm.glm.GammaRegressor.__init__.fit_intercept", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Specifies if a constant (a.k.a. bias or intercept) should be\nadded to the linear predictor (X @ coef + intercept)." @@ -133886,7 +133313,7 @@ "qname": "sklearn.linear_model._glm.glm.GammaRegressor.__init__.max_iter", "default_value": "100", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=100", "description": "The maximal number of iterations for the solver.\nValues must be in the range `[1, inf)`." @@ -133902,7 +133329,7 @@ "qname": "sklearn.linear_model._glm.glm.GammaRegressor.__init__.tol", "default_value": "0.0001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-4", "description": "Stopping criterion. For the lbfgs solver,\nthe iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol``\nwhere ``g_j`` is the j-th component of the gradient (derivative) of\nthe objective function.\nValues must be in the range `(0.0, inf)`." @@ -133918,7 +133345,7 @@ "qname": "sklearn.linear_model._glm.glm.GammaRegressor.__init__.warm_start", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If set to ``True``, reuse the solution of the previous call to ``fit``\nas initialization for ``coef_`` and ``intercept_`` ." @@ -133934,7 +133361,7 @@ "qname": "sklearn.linear_model._glm.glm.GammaRegressor.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "For the lbfgs solver set verbose to any positive number for verbosity.\nValues must be in the range `[0, inf)`." @@ -133946,7 +133373,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -133987,7 +133414,7 @@ "qname": "sklearn.linear_model._glm.glm.PoissonRegressor.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -134000,7 +133427,7 @@ "qname": "sklearn.linear_model._glm.glm.PoissonRegressor.__init__.alpha", "default_value": "1.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1", "description": "Constant that multiplies the penalty term and thus determines the\nregularization strength. ``alpha = 0`` is equivalent to unpenalized\nGLMs. In this case, the design matrix `X` must have full column rank\n(no collinearities).\nValues must be in the range `[0.0, inf)`." @@ -134016,7 +133443,7 @@ "qname": "sklearn.linear_model._glm.glm.PoissonRegressor.__init__.fit_intercept", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Specifies if a constant (a.k.a. bias or intercept) should be\nadded to the linear predictor (X @ coef + intercept)." @@ -134032,7 +133459,7 @@ "qname": "sklearn.linear_model._glm.glm.PoissonRegressor.__init__.max_iter", "default_value": "100", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=100", "description": "The maximal number of iterations for the solver.\nValues must be in the range `[1, inf)`." @@ -134048,7 +133475,7 @@ "qname": "sklearn.linear_model._glm.glm.PoissonRegressor.__init__.tol", "default_value": "0.0001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-4", "description": "Stopping criterion. For the lbfgs solver,\nthe iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol``\nwhere ``g_j`` is the j-th component of the gradient (derivative) of\nthe objective function.\nValues must be in the range `(0.0, inf)`." @@ -134064,7 +133491,7 @@ "qname": "sklearn.linear_model._glm.glm.PoissonRegressor.__init__.warm_start", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If set to ``True``, reuse the solution of the previous call to ``fit``\nas initialization for ``coef_`` and ``intercept_`` ." @@ -134080,7 +133507,7 @@ "qname": "sklearn.linear_model._glm.glm.PoissonRegressor.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "For the lbfgs solver set verbose to any positive number for verbosity.\nValues must be in the range `[0, inf)`." @@ -134092,7 +133519,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -134133,7 +133560,7 @@ "qname": "sklearn.linear_model._glm.glm.TweedieRegressor.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -134146,7 +133573,7 @@ "qname": "sklearn.linear_model._glm.glm.TweedieRegressor.__init__.power", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0", "description": "The power determines the underlying target distribution according\nto the following table:\n\n+-------+------------------------+\n| Power | Distribution |\n+=======+========================+\n| 0 | Normal |\n+-------+------------------------+\n| 1 | Poisson |\n+-------+------------------------+\n| (1,2) | Compound Poisson Gamma |\n+-------+------------------------+\n| 2 | Gamma |\n+-------+------------------------+\n| 3 | Inverse Gaussian |\n+-------+------------------------+\n\nFor ``0 < power < 1``, no distribution exists." @@ -134162,7 +133589,7 @@ "qname": "sklearn.linear_model._glm.glm.TweedieRegressor.__init__.alpha", "default_value": "1.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1", "description": "Constant that multiplies the penalty term and thus determines the\nregularization strength. ``alpha = 0`` is equivalent to unpenalized\nGLMs. In this case, the design matrix `X` must have full column rank\n(no collinearities).\nValues must be in the range `[0.0, inf)`." @@ -134178,7 +133605,7 @@ "qname": "sklearn.linear_model._glm.glm.TweedieRegressor.__init__.fit_intercept", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Specifies if a constant (a.k.a. bias or intercept) should be\nadded to the linear predictor (X @ coef + intercept)." @@ -134194,14 +133621,14 @@ "qname": "sklearn.linear_model._glm.glm.TweedieRegressor.__init__.link", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'auto', 'identity', 'log'}, default='auto'", "description": "The link function of the GLM, i.e. mapping from linear predictor\n`X @ coeff + intercept` to prediction `y_pred`. Option 'auto' sets\nthe link depending on the chosen `power` parameter as follows:\n\n- 'identity' for ``power <= 0``, e.g. for the Normal distribution\n- 'log' for ``power > 0``, e.g. for Poisson, Gamma and Inverse Gaussian\n distributions" }, "type": { "kind": "EnumType", - "values": ["log", "auto", "identity"] + "values": ["auto", "identity", "log"] } }, { @@ -134210,7 +133637,7 @@ "qname": "sklearn.linear_model._glm.glm.TweedieRegressor.__init__.max_iter", "default_value": "100", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=100", "description": "The maximal number of iterations for the solver.\nValues must be in the range `[1, inf)`." @@ -134226,7 +133653,7 @@ "qname": "sklearn.linear_model._glm.glm.TweedieRegressor.__init__.tol", "default_value": "0.0001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-4", "description": "Stopping criterion. For the lbfgs solver,\nthe iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol``\nwhere ``g_j`` is the j-th component of the gradient (derivative) of\nthe objective function.\nValues must be in the range `(0.0, inf)`." @@ -134242,7 +133669,7 @@ "qname": "sklearn.linear_model._glm.glm.TweedieRegressor.__init__.warm_start", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If set to ``True``, reuse the solution of the previous call to ``fit``\nas initialization for ``coef_`` and ``intercept_`` ." @@ -134258,7 +133685,7 @@ "qname": "sklearn.linear_model._glm.glm.TweedieRegressor.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "For the lbfgs solver set verbose to any positive number for verbosity.\nValues must be in the range `[0, inf)`." @@ -134270,7 +133697,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -134311,7 +133738,7 @@ "qname": "sklearn.linear_model._glm.glm._GeneralizedLinearRegressor.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -134324,7 +133751,7 @@ "qname": "sklearn.linear_model._glm.glm._GeneralizedLinearRegressor.__init__.alpha", "default_value": "1.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1", "description": "Constant that multiplies the penalty term and thus determines the\nregularization strength. ``alpha = 0`` is equivalent to unpenalized\nGLMs. In this case, the design matrix `X` must have full column rank\n(no collinearities).\nValues must be in the range `[0.0, inf)`." @@ -134340,7 +133767,7 @@ "qname": "sklearn.linear_model._glm.glm._GeneralizedLinearRegressor.__init__.fit_intercept", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Specifies if a constant (a.k.a. bias or intercept) should be\nadded to the linear predictor (X @ coef + intercept)." @@ -134356,7 +133783,7 @@ "qname": "sklearn.linear_model._glm.glm._GeneralizedLinearRegressor.__init__.solver", "default_value": "'lbfgs'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "'lbfgs', default='lbfgs'", "description": "Algorithm to use in the optimization problem:\n\n'lbfgs'\n Calls scipy's L-BFGS-B optimizer." @@ -134372,7 +133799,7 @@ "qname": "sklearn.linear_model._glm.glm._GeneralizedLinearRegressor.__init__.max_iter", "default_value": "100", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=100", "description": "The maximal number of iterations for the solver.\nValues must be in the range `[1, inf)`." @@ -134388,7 +133815,7 @@ "qname": "sklearn.linear_model._glm.glm._GeneralizedLinearRegressor.__init__.tol", "default_value": "0.0001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-4", "description": "Stopping criterion. For the lbfgs solver,\nthe iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol``\nwhere ``g_j`` is the j-th component of the gradient (derivative) of\nthe objective function.\nValues must be in the range `(0.0, inf)`." @@ -134404,7 +133831,7 @@ "qname": "sklearn.linear_model._glm.glm._GeneralizedLinearRegressor.__init__.warm_start", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If set to ``True``, reuse the solution of the previous call to ``fit``\nas initialization for ``coef_`` and ``intercept_``." @@ -134420,7 +133847,7 @@ "qname": "sklearn.linear_model._glm.glm._GeneralizedLinearRegressor.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "For the lbfgs solver set verbose to any positive number for verbosity.\nValues must be in the range `[0, inf)`." @@ -134432,7 +133859,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -134551,7 +133978,7 @@ "qname": "sklearn.linear_model._glm.glm._GeneralizedLinearRegressor.family.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -134560,7 +133987,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Ensure backward compatibility for the time of deprecation.", "docstring": "Ensure backward compatibility for the time of deprecation." }, @@ -134576,7 +134003,7 @@ "qname": "sklearn.linear_model._glm.glm._GeneralizedLinearRegressor.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -134589,7 +134016,7 @@ "qname": "sklearn.linear_model._glm.glm._GeneralizedLinearRegressor.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Training data." @@ -134614,7 +134041,7 @@ "qname": "sklearn.linear_model._glm.glm._GeneralizedLinearRegressor.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "Target values." @@ -134630,7 +134057,7 @@ "qname": "sklearn.linear_model._glm.glm._GeneralizedLinearRegressor.fit.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "Sample weights." @@ -134642,7 +134069,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit a Generalized Linear Model.", "docstring": "Fit a Generalized Linear Model.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training data.\n\n y : array-like of shape (n_samples,)\n Target values.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n Returns\n -------\n self : object\n Fitted model.\n " }, @@ -134658,7 +134085,7 @@ "qname": "sklearn.linear_model._glm.glm._GeneralizedLinearRegressor.predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -134671,7 +134098,7 @@ "qname": "sklearn.linear_model._glm.glm._GeneralizedLinearRegressor.predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Samples." @@ -134692,7 +134119,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict using GLM with feature matrix X.", "docstring": "Predict using GLM with feature matrix X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Samples.\n\n Returns\n -------\n y_pred : array of shape (n_samples,)\n Returns predicted values.\n " }, @@ -134708,7 +134135,7 @@ "qname": "sklearn.linear_model._glm.glm._GeneralizedLinearRegressor.score.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -134721,7 +134148,7 @@ "qname": "sklearn.linear_model._glm.glm._GeneralizedLinearRegressor.score.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Test samples." @@ -134746,7 +134173,7 @@ "qname": "sklearn.linear_model._glm.glm._GeneralizedLinearRegressor.score.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "True values of target." @@ -134762,7 +134189,7 @@ "qname": "sklearn.linear_model._glm.glm._GeneralizedLinearRegressor.score.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "Sample weights." @@ -134774,7 +134201,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute D^2, the percentage of deviance explained.\n\nD^2 is a generalization of the coefficient of determination R^2.\nR^2 uses squared error and D^2 uses the deviance of this GLM, see the\n:ref:`User Guide `.\n\nD^2 is defined as\n:math:`D^2 = 1-\\frac{D(y_{true},y_{pred})}{D_{null}}`,\n:math:`D_{null}` is the null deviance, i.e. the deviance of a model\nwith intercept alone, which corresponds to :math:`y_{pred} = \\bar{y}`.\nThe mean :math:`\\bar{y}` is averaged by sample_weight.\nBest possible score is 1.0 and it can be negative (because the model\ncan be arbitrarily worse).", "docstring": "Compute D^2, the percentage of deviance explained.\n\n D^2 is a generalization of the coefficient of determination R^2.\n R^2 uses squared error and D^2 uses the deviance of this GLM, see the\n :ref:`User Guide `.\n\n D^2 is defined as\n :math:`D^2 = 1-\\frac{D(y_{true},y_{pred})}{D_{null}}`,\n :math:`D_{null}` is the null deviance, i.e. the deviance of a model\n with intercept alone, which corresponds to :math:`y_{pred} = \\bar{y}`.\n The mean :math:`\\bar{y}` is averaged by sample_weight.\n Best possible score is 1.0 and it can be negative (because the model\n can be arbitrarily worse).\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Test samples.\n\n y : array-like of shape (n_samples,)\n True values of target.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n Returns\n -------\n score : float\n D^2 of self.predict(X) w.r.t. y.\n " }, @@ -134790,7 +134217,7 @@ "qname": "sklearn.linear_model._huber.HuberRegressor.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -134803,7 +134230,7 @@ "qname": "sklearn.linear_model._huber.HuberRegressor.__init__.epsilon", "default_value": "1.35", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, greater than 1.0, default=1.35", "description": "The parameter epsilon controls the number of samples that should be\nclassified as outliers. The smaller the epsilon, the more robust it is\nto outliers." @@ -134828,7 +134255,7 @@ "qname": "sklearn.linear_model._huber.HuberRegressor.__init__.max_iter", "default_value": "100", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=100", "description": "Maximum number of iterations that\n``scipy.optimize.minimize(method=\"L-BFGS-B\")`` should run for." @@ -134844,7 +134271,7 @@ "qname": "sklearn.linear_model._huber.HuberRegressor.__init__.alpha", "default_value": "0.0001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0001", "description": "Regularization parameter." @@ -134860,7 +134287,7 @@ "qname": "sklearn.linear_model._huber.HuberRegressor.__init__.warm_start", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "This is useful if the stored attributes of a previously used model\nhas to be reused. If set to False, then the coefficients will\nbe rewritten for every call to fit.\nSee :term:`the Glossary `." @@ -134876,7 +134303,7 @@ "qname": "sklearn.linear_model._huber.HuberRegressor.__init__.fit_intercept", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether or not to fit the intercept. This can be set to False\nif the data is already centered around the origin." @@ -134892,7 +134319,7 @@ "qname": "sklearn.linear_model._huber.HuberRegressor.__init__.tol", "default_value": "1e-05", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-05", "description": "The iteration will stop when\n``max{|proj g_i | i = 1, ..., n}`` <= ``tol``\nwhere pg_i is the i-th component of the projected gradient." @@ -134904,7 +134331,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -134920,7 +134347,7 @@ "qname": "sklearn.linear_model._huber.HuberRegressor.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -134933,7 +134360,7 @@ "qname": "sklearn.linear_model._huber.HuberRegressor.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like, shape (n_samples, n_features)", "description": "Training vector, where `n_samples` is the number of samples and\n`n_features` is the number of features." @@ -134958,7 +134385,7 @@ "qname": "sklearn.linear_model._huber.HuberRegressor.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like, shape (n_samples,)", "description": "Target vector relative to X." @@ -134983,7 +134410,7 @@ "qname": "sklearn.linear_model._huber.HuberRegressor.fit.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like, shape (n_samples,)", "description": "Weight given to each sample." @@ -135004,7 +134431,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the model according to the given training data.", "docstring": "Fit the model according to the given training data.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Training vector, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n y : array-like, shape (n_samples,)\n Target vector relative to X.\n\n sample_weight : array-like, shape (n_samples,)\n Weight given to each sample.\n\n Returns\n -------\n self : object\n Fitted `HuberRegressor` estimator.\n " }, @@ -135137,7 +134564,7 @@ "qname": "sklearn.linear_model._least_angle.Lars.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -135150,7 +134577,7 @@ "qname": "sklearn.linear_model._least_angle.Lars.__init__.fit_intercept", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to calculate the intercept for this model. If set\nto false, no intercept will be used in calculations\n(i.e. data is expected to be centered)." @@ -135166,7 +134593,7 @@ "qname": "sklearn.linear_model._least_angle.Lars.__init__.verbose", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool or int, default=False", "description": "Sets the verbosity amount." @@ -135191,7 +134618,7 @@ "qname": "sklearn.linear_model._least_angle.Lars.__init__.normalize", "default_value": "'deprecated'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "This parameter is ignored when ``fit_intercept`` is set to False.\nIf True, the regressors X will be normalized before regression by\nsubtracting the mean and dividing by the l2-norm.\nIf you wish to standardize, please use\n:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``\non an estimator with ``normalize=False``.\n\n.. deprecated:: 1.0\n ``normalize`` was deprecated in version 1.0. It will default\n to False in 1.2 and be removed in 1.4." @@ -135207,7 +134634,7 @@ "qname": "sklearn.linear_model._least_angle.Lars.__init__.precompute", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, 'auto' or array-like , default='auto'", "description": "Whether to use a precomputed Gram matrix to speed up\ncalculations. If set to ``'auto'`` let us decide. The Gram\nmatrix can also be passed as argument." @@ -135236,7 +134663,7 @@ "qname": "sklearn.linear_model._least_angle.Lars.__init__.n_nonzero_coefs", "default_value": "500", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=500", "description": "Target number of non-zero coefficients. Use ``np.inf`` for no limit." @@ -135252,7 +134679,7 @@ "qname": "sklearn.linear_model._least_angle.Lars.__init__.eps", "default_value": "np.finfo(float).eps", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=np.finfo(float).eps", "description": "The machine-precision regularization in the computation of the\nCholesky diagonal factors. Increase this for very ill-conditioned\nsystems. Unlike the ``tol`` parameter in some iterative\noptimization-based algorithms, this parameter does not control\nthe tolerance of the optimization." @@ -135268,7 +134695,7 @@ "qname": "sklearn.linear_model._least_angle.Lars.__init__.copy_X", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If ``True``, X will be copied; else, it may be overwritten." @@ -135284,7 +134711,7 @@ "qname": "sklearn.linear_model._least_angle.Lars.__init__.fit_path", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If True the full path is stored in the ``coef_path_`` attribute.\nIf you compute the solution for a large problem or many targets,\nsetting ``fit_path`` to ``False`` will lead to a speedup, especially\nwith a small alpha." @@ -135300,7 +134727,7 @@ "qname": "sklearn.linear_model._least_angle.Lars.__init__.jitter", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=None", "description": "Upper bound on a uniform noise parameter to be added to the\n`y` values, to satisfy the model's assumption of\none-at-a-time computations. Might help with stability.\n\n.. versionadded:: 0.23" @@ -135316,7 +134743,7 @@ "qname": "sklearn.linear_model._least_angle.Lars.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Determines random number generation for jittering. Pass an int\nfor reproducible output across multiple function calls.\nSee :term:`Glossary `. Ignored if `jitter` is None.\n\n.. versionadded:: 0.23" @@ -135341,7 +134768,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -135524,7 +134951,7 @@ "qname": "sklearn.linear_model._least_angle.Lars.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -135537,7 +134964,7 @@ "qname": "sklearn.linear_model._least_angle.Lars.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training data." @@ -135553,7 +134980,7 @@ "qname": "sklearn.linear_model._least_angle.Lars.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,) or (n_samples, n_targets)", "description": "Target values." @@ -135569,7 +134996,7 @@ "qname": "sklearn.linear_model._least_angle.Lars.fit.Xy", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,) or (n_samples, n_targets), default=None", "description": "Xy = np.dot(X.T, y) that can be precomputed. It is useful\nonly when the Gram matrix is precomputed." @@ -135581,7 +135008,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the model using X, y as training data.", "docstring": "Fit the model using X, y as training data.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data.\n\n y : array-like of shape (n_samples,) or (n_samples, n_targets)\n Target values.\n\n Xy : array-like of shape (n_samples,) or (n_samples, n_targets), default=None\n Xy = np.dot(X.T, y) that can be precomputed. It is useful\n only when the Gram matrix is precomputed.\n\n Returns\n -------\n self : object\n Returns an instance of self.\n " }, @@ -135597,7 +135024,7 @@ "qname": "sklearn.linear_model._least_angle.LarsCV.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -135610,7 +135037,7 @@ "qname": "sklearn.linear_model._least_angle.LarsCV.__init__.fit_intercept", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to calculate the intercept for this model. If set\nto false, no intercept will be used in calculations\n(i.e. data is expected to be centered)." @@ -135626,7 +135053,7 @@ "qname": "sklearn.linear_model._least_angle.LarsCV.__init__.verbose", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool or int, default=False", "description": "Sets the verbosity amount." @@ -135651,7 +135078,7 @@ "qname": "sklearn.linear_model._least_angle.LarsCV.__init__.max_iter", "default_value": "500", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=500", "description": "Maximum number of iterations to perform." @@ -135667,7 +135094,7 @@ "qname": "sklearn.linear_model._least_angle.LarsCV.__init__.normalize", "default_value": "'deprecated'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "This parameter is ignored when ``fit_intercept`` is set to False.\nIf True, the regressors X will be normalized before regression by\nsubtracting the mean and dividing by the l2-norm.\nIf you wish to standardize, please use\n:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``\non an estimator with ``normalize=False``.\n\n.. deprecated:: 1.0\n ``normalize`` was deprecated in version 1.0. It will default\n to False in 1.2 and be removed in 1.4." @@ -135683,7 +135110,7 @@ "qname": "sklearn.linear_model._least_angle.LarsCV.__init__.precompute", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, 'auto' or array-like , default='auto'", "description": "Whether to use a precomputed Gram matrix to speed up\ncalculations. If set to ``'auto'`` let us decide. The Gram matrix\ncannot be passed as argument since we will use only subsets of X." @@ -135712,7 +135139,7 @@ "qname": "sklearn.linear_model._least_angle.LarsCV.__init__.cv", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, cross-validation generator or an iterable, default=None", "description": "Determines the cross-validation splitting strategy.\nPossible inputs for cv are:\n\n- None, to use the default 5-fold cross-validation,\n- integer, to specify the number of folds.\n- :term:`CV splitter`,\n- An iterable yielding (train, test) splits as arrays of indices.\n\nFor integer/None inputs, :class:`KFold` is used.\n\nRefer :ref:`User Guide ` for the various\ncross-validation strategies that can be used here.\n\n.. versionchanged:: 0.22\n ``cv`` default value if None changed from 3-fold to 5-fold." @@ -135741,7 +135168,7 @@ "qname": "sklearn.linear_model._least_angle.LarsCV.__init__.max_n_alphas", "default_value": "1000", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1000", "description": "The maximum number of points on the path used to compute the\nresiduals in the cross-validation." @@ -135757,7 +135184,7 @@ "qname": "sklearn.linear_model._least_angle.LarsCV.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or None, default=None", "description": "Number of CPUs to use during the cross validation.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details." @@ -135782,7 +135209,7 @@ "qname": "sklearn.linear_model._least_angle.LarsCV.__init__.eps", "default_value": "np.finfo(float).eps", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=np.finfo(float).eps", "description": "The machine-precision regularization in the computation of the\nCholesky diagonal factors. Increase this for very ill-conditioned\nsystems. Unlike the ``tol`` parameter in some iterative\noptimization-based algorithms, this parameter does not control\nthe tolerance of the optimization." @@ -135798,7 +135225,7 @@ "qname": "sklearn.linear_model._least_angle.LarsCV.__init__.copy_X", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If ``True``, X will be copied; else, it may be overwritten." @@ -135810,7 +135237,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -135851,7 +135278,7 @@ "qname": "sklearn.linear_model._least_angle.LarsCV.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -135864,7 +135291,7 @@ "qname": "sklearn.linear_model._least_angle.LarsCV.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training data." @@ -135880,7 +135307,7 @@ "qname": "sklearn.linear_model._least_angle.LarsCV.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "Target values." @@ -135892,7 +135319,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the model using X, y as training data.", "docstring": "Fit the model using X, y as training data.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data.\n\n y : array-like of shape (n_samples,)\n Target values.\n\n Returns\n -------\n self : object\n Returns an instance of self.\n " }, @@ -135908,7 +135335,7 @@ "qname": "sklearn.linear_model._least_angle.LassoLars.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -135921,7 +135348,7 @@ "qname": "sklearn.linear_model._least_angle.LassoLars.__init__.alpha", "default_value": "1.0", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1.0", "description": "Constant that multiplies the penalty term. Defaults to 1.0.\n``alpha = 0`` is equivalent to an ordinary least square, solved\nby :class:`LinearRegression`. For numerical reasons, using\n``alpha = 0`` with the LassoLars object is not advised and you\nshould prefer the LinearRegression object." @@ -135937,7 +135364,7 @@ "qname": "sklearn.linear_model._least_angle.LassoLars.__init__.fit_intercept", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to calculate the intercept for this model. If set\nto false, no intercept will be used in calculations\n(i.e. data is expected to be centered)." @@ -135953,7 +135380,7 @@ "qname": "sklearn.linear_model._least_angle.LassoLars.__init__.verbose", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool or int, default=False", "description": "Sets the verbosity amount." @@ -135978,7 +135405,7 @@ "qname": "sklearn.linear_model._least_angle.LassoLars.__init__.normalize", "default_value": "'deprecated'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "This parameter is ignored when ``fit_intercept`` is set to False.\nIf True, the regressors X will be normalized before regression by\nsubtracting the mean and dividing by the l2-norm.\nIf you wish to standardize, please use\n:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``\non an estimator with ``normalize=False``.\n\n.. deprecated:: 1.0\n ``normalize`` was deprecated in version 1.0. It will default\n to False in 1.2 and be removed in 1.4." @@ -135994,7 +135421,7 @@ "qname": "sklearn.linear_model._least_angle.LassoLars.__init__.precompute", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, 'auto' or array-like, default='auto'", "description": "Whether to use a precomputed Gram matrix to speed up\ncalculations. If set to ``'auto'`` let us decide. The Gram\nmatrix can also be passed as argument." @@ -136023,7 +135450,7 @@ "qname": "sklearn.linear_model._least_angle.LassoLars.__init__.max_iter", "default_value": "500", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=500", "description": "Maximum number of iterations to perform." @@ -136039,7 +135466,7 @@ "qname": "sklearn.linear_model._least_angle.LassoLars.__init__.eps", "default_value": "np.finfo(float).eps", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=np.finfo(float).eps", "description": "The machine-precision regularization in the computation of the\nCholesky diagonal factors. Increase this for very ill-conditioned\nsystems. Unlike the ``tol`` parameter in some iterative\noptimization-based algorithms, this parameter does not control\nthe tolerance of the optimization." @@ -136055,7 +135482,7 @@ "qname": "sklearn.linear_model._least_angle.LassoLars.__init__.copy_X", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If True, X will be copied; else, it may be overwritten." @@ -136071,7 +135498,7 @@ "qname": "sklearn.linear_model._least_angle.LassoLars.__init__.fit_path", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If ``True`` the full path is stored in the ``coef_path_`` attribute.\nIf you compute the solution for a large problem or many targets,\nsetting ``fit_path`` to ``False`` will lead to a speedup, especially\nwith a small alpha." @@ -136087,7 +135514,7 @@ "qname": "sklearn.linear_model._least_angle.LassoLars.__init__.positive", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Restrict coefficients to be >= 0. Be aware that you might want to\nremove fit_intercept which is set True by default.\nUnder the positive restriction the model coefficients will not converge\nto the ordinary-least-squares solution for small values of alpha.\nOnly coefficients up to the smallest alpha value (``alphas_[alphas_ >\n0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso\nalgorithm are typically in congruence with the solution of the\ncoordinate descent Lasso estimator." @@ -136103,7 +135530,7 @@ "qname": "sklearn.linear_model._least_angle.LassoLars.__init__.jitter", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=None", "description": "Upper bound on a uniform noise parameter to be added to the\n`y` values, to satisfy the model's assumption of\none-at-a-time computations. Might help with stability.\n\n.. versionadded:: 0.23" @@ -136119,7 +135546,7 @@ "qname": "sklearn.linear_model._least_angle.LassoLars.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Determines random number generation for jittering. Pass an int\nfor reproducible output across multiple function calls.\nSee :term:`Glossary `. Ignored if `jitter` is None.\n\n.. versionadded:: 0.23" @@ -136144,7 +135571,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -136160,7 +135587,7 @@ "qname": "sklearn.linear_model._least_angle.LassoLarsCV.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -136173,7 +135600,7 @@ "qname": "sklearn.linear_model._least_angle.LassoLarsCV.__init__.fit_intercept", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to calculate the intercept for this model. If set\nto false, no intercept will be used in calculations\n(i.e. data is expected to be centered)." @@ -136189,7 +135616,7 @@ "qname": "sklearn.linear_model._least_angle.LassoLarsCV.__init__.verbose", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool or int, default=False", "description": "Sets the verbosity amount." @@ -136214,7 +135641,7 @@ "qname": "sklearn.linear_model._least_angle.LassoLarsCV.__init__.max_iter", "default_value": "500", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=500", "description": "Maximum number of iterations to perform." @@ -136230,7 +135657,7 @@ "qname": "sklearn.linear_model._least_angle.LassoLarsCV.__init__.normalize", "default_value": "'deprecated'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "This parameter is ignored when ``fit_intercept`` is set to False.\nIf True, the regressors X will be normalized before regression by\nsubtracting the mean and dividing by the l2-norm.\nIf you wish to standardize, please use\n:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``\non an estimator with ``normalize=False``.\n\n.. deprecated:: 1.0\n ``normalize`` was deprecated in version 1.0. It will default\n to False in 1.2 and be removed in 1.4." @@ -136246,7 +135673,7 @@ "qname": "sklearn.linear_model._least_angle.LassoLarsCV.__init__.precompute", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool or 'auto' , default='auto'", "description": "Whether to use a precomputed Gram matrix to speed up\ncalculations. If set to ``'auto'`` let us decide. The Gram matrix\ncannot be passed as argument since we will use only subsets of X." @@ -136271,7 +135698,7 @@ "qname": "sklearn.linear_model._least_angle.LassoLarsCV.__init__.cv", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, cross-validation generator or an iterable, default=None", "description": "Determines the cross-validation splitting strategy.\nPossible inputs for cv are:\n\n- None, to use the default 5-fold cross-validation,\n- integer, to specify the number of folds.\n- :term:`CV splitter`,\n- An iterable yielding (train, test) splits as arrays of indices.\n\nFor integer/None inputs, :class:`KFold` is used.\n\nRefer :ref:`User Guide ` for the various\ncross-validation strategies that can be used here.\n\n.. versionchanged:: 0.22\n ``cv`` default value if None changed from 3-fold to 5-fold." @@ -136300,7 +135727,7 @@ "qname": "sklearn.linear_model._least_angle.LassoLarsCV.__init__.max_n_alphas", "default_value": "1000", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1000", "description": "The maximum number of points on the path used to compute the\nresiduals in the cross-validation." @@ -136316,7 +135743,7 @@ "qname": "sklearn.linear_model._least_angle.LassoLarsCV.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or None, default=None", "description": "Number of CPUs to use during the cross validation.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details." @@ -136341,7 +135768,7 @@ "qname": "sklearn.linear_model._least_angle.LassoLarsCV.__init__.eps", "default_value": "np.finfo(float).eps", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=np.finfo(float).eps", "description": "The machine-precision regularization in the computation of the\nCholesky diagonal factors. Increase this for very ill-conditioned\nsystems. Unlike the ``tol`` parameter in some iterative\noptimization-based algorithms, this parameter does not control\nthe tolerance of the optimization." @@ -136357,7 +135784,7 @@ "qname": "sklearn.linear_model._least_angle.LassoLarsCV.__init__.copy_X", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If True, X will be copied; else, it may be overwritten." @@ -136373,7 +135800,7 @@ "qname": "sklearn.linear_model._least_angle.LassoLarsCV.__init__.positive", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Restrict coefficients to be >= 0. Be aware that you might want to\nremove fit_intercept which is set True by default.\nUnder the positive restriction the model coefficients do not converge\nto the ordinary-least-squares solution for small values of alpha.\nOnly coefficients up to the smallest alpha value (``alphas_[alphas_ >\n0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso\nalgorithm are typically in congruence with the solution of the\ncoordinate descent Lasso estimator.\nAs a consequence using LassoLarsCV only makes sense for problems where\na sparse solution is expected and/or reached." @@ -136385,7 +135812,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -136401,7 +135828,7 @@ "qname": "sklearn.linear_model._least_angle.LassoLarsIC.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -136414,7 +135841,7 @@ "qname": "sklearn.linear_model._least_angle.LassoLarsIC.__init__.criterion", "default_value": "'aic'", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{'aic', 'bic'}, default='aic'", "description": "The type of criterion to use." @@ -136430,7 +135857,7 @@ "qname": "sklearn.linear_model._least_angle.LassoLarsIC.__init__.fit_intercept", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to calculate the intercept for this model. If set\nto false, no intercept will be used in calculations\n(i.e. data is expected to be centered)." @@ -136446,7 +135873,7 @@ "qname": "sklearn.linear_model._least_angle.LassoLarsIC.__init__.verbose", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool or int, default=False", "description": "Sets the verbosity amount." @@ -136471,7 +135898,7 @@ "qname": "sklearn.linear_model._least_angle.LassoLarsIC.__init__.normalize", "default_value": "'deprecated'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "This parameter is ignored when ``fit_intercept`` is set to False.\nIf True, the regressors X will be normalized before regression by\nsubtracting the mean and dividing by the l2-norm.\nIf you wish to standardize, please use\n:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``\non an estimator with ``normalize=False``.\n\n.. deprecated:: 1.0\n ``normalize`` was deprecated in version 1.0. It will default\n to False in 1.2 and be removed in 1.4." @@ -136487,7 +135914,7 @@ "qname": "sklearn.linear_model._least_angle.LassoLarsIC.__init__.precompute", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, 'auto' or array-like, default='auto'", "description": "Whether to use a precomputed Gram matrix to speed up\ncalculations. If set to ``'auto'`` let us decide. The Gram\nmatrix can also be passed as argument." @@ -136516,7 +135943,7 @@ "qname": "sklearn.linear_model._least_angle.LassoLarsIC.__init__.max_iter", "default_value": "500", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=500", "description": "Maximum number of iterations to perform. Can be used for\nearly stopping." @@ -136532,7 +135959,7 @@ "qname": "sklearn.linear_model._least_angle.LassoLarsIC.__init__.eps", "default_value": "np.finfo(float).eps", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=np.finfo(float).eps", "description": "The machine-precision regularization in the computation of the\nCholesky diagonal factors. Increase this for very ill-conditioned\nsystems. Unlike the ``tol`` parameter in some iterative\noptimization-based algorithms, this parameter does not control\nthe tolerance of the optimization." @@ -136548,7 +135975,7 @@ "qname": "sklearn.linear_model._least_angle.LassoLarsIC.__init__.copy_X", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If True, X will be copied; else, it may be overwritten." @@ -136564,7 +135991,7 @@ "qname": "sklearn.linear_model._least_angle.LassoLarsIC.__init__.positive", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Restrict coefficients to be >= 0. Be aware that you might want to\nremove fit_intercept which is set True by default.\nUnder the positive restriction the model coefficients do not converge\nto the ordinary-least-squares solution for small values of alpha.\nOnly coefficients up to the smallest alpha value (``alphas_[alphas_ >\n0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso\nalgorithm are typically in congruence with the solution of the\ncoordinate descent Lasso estimator.\nAs a consequence using LassoLarsIC only makes sense for problems where\na sparse solution is expected and/or reached." @@ -136580,7 +136007,7 @@ "qname": "sklearn.linear_model._least_angle.LassoLarsIC.__init__.noise_variance", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=None", "description": "The estimated noise variance of the data. If `None`, an unbiased\nestimate is computed by an OLS model. However, it is only possible\nin the case where `n_samples > n_features + fit_intercept`.\n\n.. versionadded:: 1.1" @@ -136592,7 +136019,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -136706,7 +136133,7 @@ "qname": "sklearn.linear_model._least_angle.LassoLarsIC.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -136719,7 +136146,7 @@ "qname": "sklearn.linear_model._least_angle.LassoLarsIC.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training data." @@ -136735,7 +136162,7 @@ "qname": "sklearn.linear_model._least_angle.LassoLarsIC.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "Target values. Will be cast to X's dtype if necessary." @@ -136751,7 +136178,7 @@ "qname": "sklearn.linear_model._least_angle.LassoLarsIC.fit.copy_X", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=None", "description": "If provided, this parameter will override the choice\nof copy_X made at instance creation.\nIf ``True``, X will be copied; else, it may be overwritten." @@ -136763,7 +136190,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the model using X, y as training data.", "docstring": "Fit the model using X, y as training data.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data.\n\n y : array-like of shape (n_samples,)\n Target values. Will be cast to X's dtype if necessary.\n\n copy_X : bool, default=None\n If provided, this parameter will override the choice\n of copy_X made at instance creation.\n If ``True``, X will be copied; else, it may be overwritten.\n\n Returns\n -------\n self : object\n Returns an instance of self.\n " }, @@ -138571,7 +137998,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegression.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -138584,14 +138011,14 @@ "qname": "sklearn.linear_model._logistic.LogisticRegression.__init__.penalty", "default_value": "'l2'", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{'l1', 'l2', 'elasticnet', 'none'}, default='l2'", "description": "Specify the norm of the penalty:\n\n- `'none'`: no penalty is added;\n- `'l2'`: add a L2 penalty term and it is the default choice;\n- `'l1'`: add a L1 penalty term;\n- `'elasticnet'`: both L1 and L2 penalty terms are added.\n\n.. warning::\n Some penalties may not work with some solvers. See the parameter\n `solver` below, to know the compatibility between the penalty and\n solver.\n\n.. versionadded:: 0.19\n l1 penalty with SAGA solver (allowing 'multinomial' + L1)" }, "type": { "kind": "EnumType", - "values": ["l2", "none", "elasticnet", "l1"] + "values": ["l2", "none", "l1", "elasticnet"] } }, { @@ -138600,7 +138027,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegression.__init__.dual", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Dual or primal formulation. Dual formulation is only implemented for\nl2 penalty with liblinear solver. Prefer dual=False when\nn_samples > n_features." @@ -138616,7 +138043,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegression.__init__.tol", "default_value": "0.0001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-4", "description": "Tolerance for stopping criteria." @@ -138632,7 +138059,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegression.__init__.C", "default_value": "1.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1.0", "description": "Inverse of regularization strength; must be a positive float.\nLike in support vector machines, smaller values specify stronger\nregularization." @@ -138648,7 +138075,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegression.__init__.fit_intercept", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Specifies if a constant (a.k.a. bias or intercept) should be\nadded to the decision function." @@ -138664,7 +138091,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegression.__init__.intercept_scaling", "default_value": "1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1", "description": "Useful only when the solver 'liblinear' is used\nand self.fit_intercept is set to True. In this case, x becomes\n[x, self.intercept_scaling],\ni.e. a \"synthetic\" feature with constant value equal to\nintercept_scaling is appended to the instance vector.\nThe intercept becomes ``intercept_scaling * synthetic_feature_weight``.\n\nNote! the synthetic feature weight is subject to l1/l2 regularization\nas all other features.\nTo lessen the effect of regularization on synthetic feature weight\n(and therefore on the intercept) intercept_scaling has to be increased." @@ -138680,7 +138107,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegression.__init__.class_weight", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict or 'balanced', default=None", "description": "Weights associated with classes in the form ``{class_label: weight}``.\nIf not given, all classes are supposed to have weight one.\n\nThe \"balanced\" mode uses the values of y to automatically adjust\nweights inversely proportional to class frequencies in the input data\nas ``n_samples / (n_classes * np.bincount(y))``.\n\nNote that these weights will be multiplied with sample_weight (passed\nthrough the fit method) if sample_weight is specified.\n\n.. versionadded:: 0.17\n *class_weight='balanced'*" @@ -138705,7 +138132,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegression.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance, default=None", "description": "Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the\ndata. See :term:`Glossary ` for details." @@ -138730,14 +138157,14 @@ "qname": "sklearn.linear_model._logistic.LogisticRegression.__init__.solver", "default_value": "'lbfgs'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'}, default='lbfgs'", "description": "Algorithm to use in the optimization problem. Default is 'lbfgs'.\nTo choose a solver, you might want to consider the following aspects:\n\n - For small datasets, 'liblinear' is a good choice, whereas 'sag'\n and 'saga' are faster for large ones;\n - For multiclass problems, only 'newton-cg', 'sag', 'saga' and\n 'lbfgs' handle multinomial loss;\n - 'liblinear' is limited to one-versus-rest schemes.\n\n.. warning::\n The choice of the algorithm depends on the penalty chosen:\n Supported penalties by solver:\n\n - 'newton-cg' - ['l2', 'none']\n - 'lbfgs' - ['l2', 'none']\n - 'liblinear' - ['l1', 'l2']\n - 'sag' - ['l2', 'none']\n - 'saga' - ['elasticnet', 'l1', 'l2', 'none']\n\n.. note::\n 'sag' and 'saga' fast convergence is only guaranteed on\n features with approximately the same scale. You can\n preprocess the data with a scaler from :mod:`sklearn.preprocessing`.\n\n.. seealso::\n Refer to the User Guide for more information regarding\n :class:`LogisticRegression` and more specifically the\n `Table `_\n summarazing solver/penalty supports.\n\n.. versionadded:: 0.17\n Stochastic Average Gradient descent solver.\n.. versionadded:: 0.19\n SAGA solver.\n.. versionchanged:: 0.22\n The default solver changed from 'liblinear' to 'lbfgs' in 0.22." }, "type": { "kind": "EnumType", - "values": ["lbfgs", "newton-cg", "saga", "sag", "liblinear"] + "values": ["saga", "newton-cg", "sag", "liblinear", "lbfgs"] } }, { @@ -138746,7 +138173,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegression.__init__.max_iter", "default_value": "100", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=100", "description": "Maximum number of iterations taken for the solvers to converge." @@ -138762,14 +138189,14 @@ "qname": "sklearn.linear_model._logistic.LogisticRegression.__init__.multi_class", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'auto', 'ovr', 'multinomial'}, default='auto'", "description": "If the option chosen is 'ovr', then a binary problem is fit for each\nlabel. For 'multinomial' the loss minimised is the multinomial loss fit\nacross the entire probability distribution, *even when the data is\nbinary*. 'multinomial' is unavailable when solver='liblinear'.\n'auto' selects 'ovr' if the data is binary, or if solver='liblinear',\nand otherwise selects 'multinomial'.\n\n.. versionadded:: 0.18\n Stochastic Average Gradient descent solver for 'multinomial' case.\n.. versionchanged:: 0.22\n Default changed from 'ovr' to 'auto' in 0.22." }, "type": { "kind": "EnumType", - "values": ["multinomial", "ovr", "auto"] + "values": ["auto", "ovr", "multinomial"] } }, { @@ -138778,7 +138205,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegression.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "For the liblinear and lbfgs solvers set verbose to any positive\nnumber for verbosity." @@ -138794,7 +138221,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegression.__init__.warm_start", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "When set to True, reuse the solution of the previous call to fit as\ninitialization, otherwise, just erase the previous solution.\nUseless for liblinear solver. See :term:`the Glossary `.\n\n.. versionadded:: 0.17\n *warm_start* to support *lbfgs*, *newton-cg*, *sag*, *saga* solvers." @@ -138810,7 +138237,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegression.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Number of CPU cores used when parallelizing over classes if\nmulti_class='ovr'\". This parameter is ignored when the ``solver`` is\nset to 'liblinear' regardless of whether 'multi_class' is specified or\nnot. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`\ncontext. ``-1`` means using all processors.\nSee :term:`Glossary ` for more details." @@ -138826,7 +138253,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegression.__init__.l1_ratio", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=None", "description": "The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only\nused if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent\nto using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent\nto using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a\ncombination of L1 and L2." @@ -138838,7 +138265,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -138854,7 +138281,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegression.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -138867,7 +138294,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegression.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Training vector, where `n_samples` is the number of samples and\n`n_features` is the number of features." @@ -138892,7 +138319,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegression.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "Target vector relative to X." @@ -138908,7 +138335,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegression.fit.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,) default=None", "description": "Array of weights that are assigned to individual samples.\nIf not provided, then each sample is given unit weight.\n\n.. versionadded:: 0.17\n *sample_weight* support to LogisticRegression." @@ -138920,7 +138347,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the model according to the given training data.", "docstring": "\n Fit the model according to the given training data.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vector, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n y : array-like of shape (n_samples,)\n Target vector relative to X.\n\n sample_weight : array-like of shape (n_samples,) default=None\n Array of weights that are assigned to individual samples.\n If not provided, then each sample is given unit weight.\n\n .. versionadded:: 0.17\n *sample_weight* support to LogisticRegression.\n\n Returns\n -------\n self\n Fitted estimator.\n\n Notes\n -----\n The SAGA solver supports both float64 and float32 bit arrays.\n " }, @@ -138936,7 +138363,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegression.predict_log_proba.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -138949,7 +138376,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegression.predict_log_proba.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Vector to be scored, where `n_samples` is the number of samples and\n`n_features` is the number of features." @@ -138961,7 +138388,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict logarithm of probability estimates.\n\nThe returned estimates for all classes are ordered by the\nlabel of classes.", "docstring": "\n Predict logarithm of probability estimates.\n\n The returned estimates for all classes are ordered by the\n label of classes.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Vector to be scored, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n Returns\n -------\n T : array-like of shape (n_samples, n_classes)\n Returns the log-probability of the sample for each class in the\n model, where classes are ordered as they are in ``self.classes_``.\n " }, @@ -138977,7 +138404,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegression.predict_proba.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -138990,7 +138417,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegression.predict_proba.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Vector to be scored, where `n_samples` is the number of samples and\n`n_features` is the number of features." @@ -139002,7 +138429,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Probability estimates.\n\nThe returned estimates for all classes are ordered by the\nlabel of classes.\n\nFor a multi_class problem, if multi_class is set to be \"multinomial\"\nthe softmax function is used to find the predicted probability of\neach class.\nElse use a one-vs-rest approach, i.e calculate the probability\nof each class assuming it to be positive using the logistic function.\nand normalize these values across all the classes.", "docstring": "\n Probability estimates.\n\n The returned estimates for all classes are ordered by the\n label of classes.\n\n For a multi_class problem, if multi_class is set to be \"multinomial\"\n the softmax function is used to find the predicted probability of\n each class.\n Else use a one-vs-rest approach, i.e calculate the probability\n of each class assuming it to be positive using the logistic function.\n and normalize these values across all the classes.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Vector to be scored, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n Returns\n -------\n T : array-like of shape (n_samples, n_classes)\n Returns the probability of the sample for each class in the model,\n where classes are ordered as they are in ``self.classes_``.\n " }, @@ -139018,7 +138445,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegressionCV.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -139031,7 +138458,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegressionCV.__init__.Cs", "default_value": "10", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or list of floats, default=10", "description": "Each of the values in Cs describes the inverse of regularization\nstrength. If Cs is as an int, then a grid of Cs values are chosen\nin a logarithmic scale between 1e-4 and 1e4.\nLike in support vector machines, smaller values specify stronger\nregularization." @@ -139056,7 +138483,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegressionCV.__init__.fit_intercept", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Specifies if a constant (a.k.a. bias or intercept) should be\nadded to the decision function." @@ -139072,7 +138499,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegressionCV.__init__.cv", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or cross-validation generator, default=None", "description": "The default cross-validation generator used is Stratified K-Folds.\nIf an integer is provided, then it is the number of folds used.\nSee the module :mod:`sklearn.model_selection` module for the\nlist of possible cross-validation objects.\n\n.. versionchanged:: 0.22\n ``cv`` default value if None changed from 3-fold to 5-fold." @@ -139097,7 +138524,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegressionCV.__init__.dual", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Dual or primal formulation. Dual formulation is only implemented for\nl2 penalty with liblinear solver. Prefer dual=False when\nn_samples > n_features." @@ -139113,14 +138540,14 @@ "qname": "sklearn.linear_model._logistic.LogisticRegressionCV.__init__.penalty", "default_value": "'l2'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'l1', 'l2', 'elasticnet'}, default='l2'", "description": "Specify the norm of the penalty:\n\n- `'l2'`: add a L2 penalty term (used by default);\n- `'l1'`: add a L1 penalty term;\n- `'elasticnet'`: both L1 and L2 penalty terms are added.\n\n.. warning::\n Some penalties may not work with some solvers. See the parameter\n `solver` below, to know the compatibility between the penalty and\n solver." }, "type": { "kind": "EnumType", - "values": ["l2", "elasticnet", "l1"] + "values": ["l2", "l1", "elasticnet"] } }, { @@ -139129,7 +138556,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegressionCV.__init__.scoring", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str or callable, default=None", "description": "A string (see model evaluation documentation) or\na scorer callable object / function with signature\n``scorer(estimator, X, y)``. For a list of scoring functions\nthat can be used, look at :mod:`sklearn.metrics`. The\ndefault scoring option used is 'accuracy'." @@ -139154,14 +138581,14 @@ "qname": "sklearn.linear_model._logistic.LogisticRegressionCV.__init__.solver", "default_value": "'lbfgs'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'}, default='lbfgs'", "description": "Algorithm to use in the optimization problem. Default is 'lbfgs'.\nTo choose a solver, you might want to consider the following aspects:\n\n - For small datasets, 'liblinear' is a good choice, whereas 'sag'\n and 'saga' are faster for large ones;\n - For multiclass problems, only 'newton-cg', 'sag', 'saga' and\n 'lbfgs' handle multinomial loss;\n - 'liblinear' might be slower in :class:`LogisticRegressionCV`\n because it does not handle warm-starting. 'liblinear' is\n limited to one-versus-rest schemes.\n\n.. warning::\n The choice of the algorithm depends on the penalty chosen:\n\n - 'newton-cg' - ['l2']\n - 'lbfgs' - ['l2']\n - 'liblinear' - ['l1', 'l2']\n - 'sag' - ['l2']\n - 'saga' - ['elasticnet', 'l1', 'l2']\n\n.. note::\n 'sag' and 'saga' fast convergence is only guaranteed on features\n with approximately the same scale. You can preprocess the data with\n a scaler from :mod:`sklearn.preprocessing`.\n\n.. versionadded:: 0.17\n Stochastic Average Gradient descent solver.\n.. versionadded:: 0.19\n SAGA solver." }, "type": { "kind": "EnumType", - "values": ["lbfgs", "newton-cg", "saga", "sag", "liblinear"] + "values": ["saga", "newton-cg", "sag", "liblinear", "lbfgs"] } }, { @@ -139170,7 +138597,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegressionCV.__init__.tol", "default_value": "0.0001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-4", "description": "Tolerance for stopping criteria." @@ -139186,7 +138613,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegressionCV.__init__.max_iter", "default_value": "100", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=100", "description": "Maximum number of iterations of the optimization algorithm." @@ -139202,7 +138629,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegressionCV.__init__.class_weight", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict or 'balanced', default=None", "description": "Weights associated with classes in the form ``{class_label: weight}``.\nIf not given, all classes are supposed to have weight one.\n\nThe \"balanced\" mode uses the values of y to automatically adjust\nweights inversely proportional to class frequencies in the input data\nas ``n_samples / (n_classes * np.bincount(y))``.\n\nNote that these weights will be multiplied with sample_weight (passed\nthrough the fit method) if sample_weight is specified.\n\n.. versionadded:: 0.17\n class_weight == 'balanced'" @@ -139227,7 +138654,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegressionCV.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Number of CPU cores used during the cross-validation loop.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details." @@ -139243,7 +138670,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegressionCV.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "For the 'liblinear', 'sag' and 'lbfgs' solvers set verbose to any\npositive number for verbosity." @@ -139259,7 +138686,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegressionCV.__init__.refit", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If set to True, the scores are averaged across all folds, and the\ncoefs and the C that corresponds to the best score is taken, and a\nfinal refit is done using these parameters.\nOtherwise the coefs, intercepts and C that correspond to the\nbest scores across folds are averaged." @@ -139275,7 +138702,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegressionCV.__init__.intercept_scaling", "default_value": "1.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1", "description": "Useful only when the solver 'liblinear' is used\nand self.fit_intercept is set to True. In this case, x becomes\n[x, self.intercept_scaling],\ni.e. a \"synthetic\" feature with constant value equal to\nintercept_scaling is appended to the instance vector.\nThe intercept becomes ``intercept_scaling * synthetic_feature_weight``.\n\nNote! the synthetic feature weight is subject to l1/l2 regularization\nas all other features.\nTo lessen the effect of regularization on synthetic feature weight\n(and therefore on the intercept) intercept_scaling has to be increased." @@ -139291,7 +138718,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegressionCV.__init__.multi_class", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'auto, 'ovr', 'multinomial'}, default='auto'", "description": "If the option chosen is 'ovr', then a binary problem is fit for each\nlabel. For 'multinomial' the loss minimised is the multinomial loss fit\nacross the entire probability distribution, *even when the data is\nbinary*. 'multinomial' is unavailable when solver='liblinear'.\n'auto' selects 'ovr' if the data is binary, or if solver='liblinear',\nand otherwise selects 'multinomial'.\n\n.. versionadded:: 0.18\n Stochastic Average Gradient descent solver for 'multinomial' case.\n.. versionchanged:: 0.22\n Default changed from 'ovr' to 'auto' in 0.22." @@ -139307,7 +138734,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegressionCV.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance, default=None", "description": "Used when `solver='sag'`, 'saga' or 'liblinear' to shuffle the data.\nNote that this only applies to the solver and not the cross-validation\ngenerator. See :term:`Glossary ` for details." @@ -139332,7 +138759,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegressionCV.__init__.l1_ratios", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "list of float, default=None", "description": "The list of Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``.\nOnly used if ``penalty='elasticnet'``. A value of 0 is equivalent to\nusing ``penalty='l2'``, while 1 is equivalent to using\n``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a combination\nof L1 and L2." @@ -139344,7 +138771,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -139385,7 +138812,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegressionCV.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -139398,7 +138825,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegressionCV.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Training vector, where `n_samples` is the number of samples and\n`n_features` is the number of features." @@ -139423,7 +138850,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegressionCV.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "Target vector relative to X." @@ -139439,7 +138866,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegressionCV.fit.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,) default=None", "description": "Array of weights that are assigned to individual samples.\nIf not provided, then each sample is given unit weight." @@ -139451,7 +138878,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the model according to the given training data.", "docstring": "Fit the model according to the given training data.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vector, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n y : array-like of shape (n_samples,)\n Target vector relative to X.\n\n sample_weight : array-like of shape (n_samples,) default=None\n Array of weights that are assigned to individual samples.\n If not provided, then each sample is given unit weight.\n\n Returns\n -------\n self : object\n Fitted LogisticRegressionCV estimator.\n " }, @@ -139467,7 +138894,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegressionCV.score.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -139480,7 +138907,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegressionCV.score.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Test samples." @@ -139496,7 +138923,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegressionCV.score.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "True labels for X." @@ -139512,7 +138939,7 @@ "qname": "sklearn.linear_model._logistic.LogisticRegressionCV.score.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "Sample weights." @@ -139524,7 +138951,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Score using the `scoring` option on the given test data and labels.", "docstring": "Score using the `scoring` option on the given test data and labels.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Test samples.\n\n y : array-like of shape (n_samples,)\n True labels for X.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n Returns\n -------\n score : float\n Score of self.predict(X) wrt. y.\n " }, @@ -139868,7 +139295,7 @@ }, "type": { "kind": "EnumType", - "values": ["lbfgs", "newton-cg", "saga", "sag", "liblinear"] + "values": ["saga", "newton-cg", "sag", "liblinear", "lbfgs"] } }, { @@ -139884,7 +139311,7 @@ }, "type": { "kind": "EnumType", - "values": ["l2", "elasticnet", "l1"] + "values": ["l2", "l1", "elasticnet"] } }, { @@ -139932,7 +139359,7 @@ }, "type": { "kind": "EnumType", - "values": ["multinomial", "ovr", "auto"] + "values": ["auto", "ovr", "multinomial"] } }, { @@ -140179,7 +139606,7 @@ }, "type": { "kind": "EnumType", - "values": ["lbfgs", "newton-cg", "saga", "sag", "liblinear"] + "values": ["saga", "newton-cg", "sag", "liblinear", "lbfgs"] } }, { @@ -140252,7 +139679,7 @@ }, "type": { "kind": "EnumType", - "values": ["l2", "elasticnet", "l1"] + "values": ["l2", "l1", "elasticnet"] } }, { @@ -140284,7 +139711,7 @@ }, "type": { "kind": "EnumType", - "values": ["multinomial", "ovr", "auto"] + "values": ["auto", "ovr", "multinomial"] } }, { @@ -140410,7 +139837,7 @@ "qname": "sklearn.linear_model._omp.OrthogonalMatchingPursuit.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -140423,7 +139850,7 @@ "qname": "sklearn.linear_model._omp.OrthogonalMatchingPursuit.__init__.n_nonzero_coefs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Desired number of non-zero entries in the solution. If None (by\ndefault) this value is set to 10% of n_features." @@ -140439,7 +139866,7 @@ "qname": "sklearn.linear_model._omp.OrthogonalMatchingPursuit.__init__.tol", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=None", "description": "Maximum norm of the residual. If not None, overrides n_nonzero_coefs." @@ -140455,7 +139882,7 @@ "qname": "sklearn.linear_model._omp.OrthogonalMatchingPursuit.__init__.fit_intercept", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to calculate the intercept for this model. If set\nto false, no intercept will be used in calculations\n(i.e. data is expected to be centered)." @@ -140471,7 +139898,7 @@ "qname": "sklearn.linear_model._omp.OrthogonalMatchingPursuit.__init__.normalize", "default_value": "'deprecated'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "This parameter is ignored when ``fit_intercept`` is set to False.\nIf True, the regressors X will be normalized before regression by\nsubtracting the mean and dividing by the l2-norm.\nIf you wish to standardize, please use\n:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``\non an estimator with ``normalize=False``.\n\n.. deprecated:: 1.0\n ``normalize`` was deprecated in version 1.0. It will default\n to False in 1.2 and be removed in 1.4." @@ -140487,7 +139914,7 @@ "qname": "sklearn.linear_model._omp.OrthogonalMatchingPursuit.__init__.precompute", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "'auto' or bool, default='auto'", "description": "Whether to use a precomputed Gram and Xy matrix to speed up\ncalculations. Improves performance when :term:`n_targets` or\n:term:`n_samples` is very large. Note that if you already have such\nmatrices, you can pass them directly to the fit method." @@ -140508,7 +139935,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -140524,7 +139951,7 @@ "qname": "sklearn.linear_model._omp.OrthogonalMatchingPursuit.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -140537,7 +139964,7 @@ "qname": "sklearn.linear_model._omp.OrthogonalMatchingPursuit.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training data." @@ -140553,7 +139980,7 @@ "qname": "sklearn.linear_model._omp.OrthogonalMatchingPursuit.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,) or (n_samples, n_targets)", "description": "Target values. Will be cast to X's dtype if necessary." @@ -140565,7 +139992,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the model using X, y as training data.", "docstring": "Fit the model using X, y as training data.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data.\n\n y : array-like of shape (n_samples,) or (n_samples, n_targets)\n Target values. Will be cast to X's dtype if necessary.\n\n Returns\n -------\n self : object\n Returns an instance of self.\n " }, @@ -140581,7 +140008,7 @@ "qname": "sklearn.linear_model._omp.OrthogonalMatchingPursuitCV.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -140594,7 +140021,7 @@ "qname": "sklearn.linear_model._omp.OrthogonalMatchingPursuitCV.__init__.copy", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether the design matrix X must be copied by the algorithm. A false\nvalue is only helpful if X is already Fortran-ordered, otherwise a\ncopy is made anyway." @@ -140610,7 +140037,7 @@ "qname": "sklearn.linear_model._omp.OrthogonalMatchingPursuitCV.__init__.fit_intercept", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to calculate the intercept for this model. If set\nto false, no intercept will be used in calculations\n(i.e. data is expected to be centered)." @@ -140626,7 +140053,7 @@ "qname": "sklearn.linear_model._omp.OrthogonalMatchingPursuitCV.__init__.normalize", "default_value": "'deprecated'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "This parameter is ignored when ``fit_intercept`` is set to False.\nIf True, the regressors X will be normalized before regression by\nsubtracting the mean and dividing by the l2-norm.\nIf you wish to standardize, please use\n:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``\non an estimator with ``normalize=False``.\n\n.. deprecated:: 1.0\n ``normalize`` was deprecated in version 1.0. It will default\n to False in 1.2 and be removed in 1.4." @@ -140642,7 +140069,7 @@ "qname": "sklearn.linear_model._omp.OrthogonalMatchingPursuitCV.__init__.max_iter", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Maximum numbers of iterations to perform, therefore maximum features\nto include. 10% of ``n_features`` but at least 5 if available." @@ -140658,7 +140085,7 @@ "qname": "sklearn.linear_model._omp.OrthogonalMatchingPursuitCV.__init__.cv", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, cross-validation generator or iterable, default=None", "description": "Determines the cross-validation splitting strategy.\nPossible inputs for cv are:\n\n- None, to use the default 5-fold cross-validation,\n- integer, to specify the number of folds.\n- :term:`CV splitter`,\n- An iterable yielding (train, test) splits as arrays of indices.\n\nFor integer/None inputs, :class:`KFold` is used.\n\nRefer :ref:`User Guide ` for the various\ncross-validation strategies that can be used here.\n\n.. versionchanged:: 0.22\n ``cv`` default value if None changed from 3-fold to 5-fold." @@ -140687,7 +140114,7 @@ "qname": "sklearn.linear_model._omp.OrthogonalMatchingPursuitCV.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Number of CPUs to use during the cross validation.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details." @@ -140703,7 +140130,7 @@ "qname": "sklearn.linear_model._omp.OrthogonalMatchingPursuitCV.__init__.verbose", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool or int, default=False", "description": "Sets the verbosity amount." @@ -140724,7 +140151,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -140740,7 +140167,7 @@ "qname": "sklearn.linear_model._omp.OrthogonalMatchingPursuitCV.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -140753,7 +140180,7 @@ "qname": "sklearn.linear_model._omp.OrthogonalMatchingPursuitCV.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training data." @@ -140769,7 +140196,7 @@ "qname": "sklearn.linear_model._omp.OrthogonalMatchingPursuitCV.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "Target values. Will be cast to X's dtype if necessary." @@ -140781,7 +140208,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the model using X, y as training data.", "docstring": "Fit the model using X, y as training data.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data.\n\n y : array-like of shape (n_samples,)\n Target values. Will be cast to X's dtype if necessary.\n\n Returns\n -------\n self : object\n Returns an instance of self.\n " }, @@ -141490,7 +140917,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveClassifier.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -141503,7 +140930,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveClassifier.__init__.C", "default_value": "1.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1.0", "description": "Maximum step size (regularization). Defaults to 1.0." @@ -141519,7 +140946,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveClassifier.__init__.fit_intercept", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether the intercept should be estimated or not. If False, the\ndata is assumed to be already centered." @@ -141535,7 +140962,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveClassifier.__init__.max_iter", "default_value": "1000", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1000", "description": "The maximum number of passes over the training data (aka epochs).\nIt only impacts the behavior in the ``fit`` method, and not the\n:meth:`partial_fit` method.\n\n.. versionadded:: 0.19" @@ -141551,7 +140978,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveClassifier.__init__.tol", "default_value": "0.001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float or None, default=1e-3", "description": "The stopping criterion. If it is not None, the iterations will stop\nwhen (loss > previous_loss - tol).\n\n.. versionadded:: 0.19" @@ -141576,7 +141003,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveClassifier.__init__.early_stopping", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Whether to use early stopping to terminate training when validation.\nscore is not improving. If set to True, it will automatically set aside\na stratified fraction of training data as validation and terminate\ntraining when validation score is not improving by at least tol for\nn_iter_no_change consecutive epochs.\n\n.. versionadded:: 0.20" @@ -141592,7 +141019,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveClassifier.__init__.validation_fraction", "default_value": "0.1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.1", "description": "The proportion of training data to set aside as validation set for\nearly stopping. Must be between 0 and 1.\nOnly used if early_stopping is True.\n\n.. versionadded:: 0.20" @@ -141608,7 +141035,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveClassifier.__init__.n_iter_no_change", "default_value": "5", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=5", "description": "Number of iterations with no improvement to wait before early stopping.\n\n.. versionadded:: 0.20" @@ -141624,7 +141051,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveClassifier.__init__.shuffle", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether or not the training data should be shuffled after each epoch." @@ -141640,7 +141067,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveClassifier.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "The verbosity level." @@ -141656,7 +141083,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveClassifier.__init__.loss", "default_value": "'hinge'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str, default=\"hinge\"", "description": "The loss function to be used:\nhinge: equivalent to PA-I in the reference paper.\nsquared_hinge: equivalent to PA-II in the reference paper." @@ -141672,7 +141099,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveClassifier.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or None, default=None", "description": "The number of CPUs to use to do the OVA (One Versus All, for\nmulti-class problems) computation.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details." @@ -141697,7 +141124,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveClassifier.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance, default=None", "description": "Used to shuffle the training data, when ``shuffle`` is set to\n``True``. Pass an int for reproducible output across multiple\nfunction calls.\nSee :term:`Glossary `." @@ -141722,7 +141149,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveClassifier.__init__.warm_start", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "When set to True, reuse the solution of the previous call to fit as\ninitialization, otherwise, just erase the previous solution.\nSee :term:`the Glossary `.\n\nRepeatedly calling fit or partial_fit when warm_start is True can\nresult in a different solution than when calling fit a single time\nbecause of the way the data is shuffled." @@ -141738,7 +141165,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveClassifier.__init__.class_weight", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict, {class_label: weight} or \"balanced\" or None, default=None", "description": "Preset for the class_weight fit parameter.\n\nWeights associated with classes. If not given, all classes\nare supposed to have weight one.\n\nThe \"balanced\" mode uses the values of y to automatically adjust\nweights inversely proportional to class frequencies in the input data\nas ``n_samples / (n_classes * np.bincount(y))``.\n\n.. versionadded:: 0.17\n parameter *class_weight* to automatically weight samples." @@ -141771,7 +141198,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveClassifier.__init__.average", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool or int, default=False", "description": "When set to True, computes the averaged SGD weights and stores the\nresult in the ``coef_`` attribute. If set to an int greater than 1,\naveraging will begin once the total number of samples seen reaches\naverage. So average=10 will begin averaging after seeing 10 samples.\n\n.. versionadded:: 0.19\n parameter *average* to use weights averaging in SGD." @@ -141792,7 +141219,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -141808,7 +141235,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveClassifier.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -141821,7 +141248,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveClassifier.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Training data." @@ -141846,7 +141273,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveClassifier.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "Target values." @@ -141862,7 +141289,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveClassifier.fit.coef_init", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (n_classes, n_features)", "description": "The initial coefficients to warm-start the optimization." @@ -141878,7 +141305,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveClassifier.fit.intercept_init", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (n_classes,)", "description": "The initial intercept to warm-start the optimization." @@ -141890,7 +141317,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit linear model with Passive Aggressive algorithm.", "docstring": "Fit linear model with Passive Aggressive algorithm.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training data.\n\n y : array-like of shape (n_samples,)\n Target values.\n\n coef_init : ndarray of shape (n_classes, n_features)\n The initial coefficients to warm-start the optimization.\n\n intercept_init : ndarray of shape (n_classes,)\n The initial intercept to warm-start the optimization.\n\n Returns\n -------\n self : object\n Fitted estimator.\n " }, @@ -141906,7 +141333,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveClassifier.partial_fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -141919,7 +141346,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveClassifier.partial_fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Subset of the training data." @@ -141944,7 +141371,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveClassifier.partial_fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "Subset of the target values." @@ -141960,7 +141387,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveClassifier.partial_fit.classes", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (n_classes,)", "description": "Classes across all calls to partial_fit.\nCan be obtained by via `np.unique(y_all)`, where y_all is the\ntarget vector of the entire dataset.\nThis argument is required for the first call to partial_fit\nand can be omitted in the subsequent calls.\nNote that y doesn't need to contain all labels in `classes`." @@ -141972,7 +141399,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit linear model with Passive Aggressive algorithm.", "docstring": "Fit linear model with Passive Aggressive algorithm.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Subset of the training data.\n\n y : array-like of shape (n_samples,)\n Subset of the target values.\n\n classes : ndarray of shape (n_classes,)\n Classes across all calls to partial_fit.\n Can be obtained by via `np.unique(y_all)`, where y_all is the\n target vector of the entire dataset.\n This argument is required for the first call to partial_fit\n and can be omitted in the subsequent calls.\n Note that y doesn't need to contain all labels in `classes`.\n\n Returns\n -------\n self : object\n Fitted estimator.\n " }, @@ -141988,7 +141415,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveRegressor.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -142001,7 +141428,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveRegressor.__init__.C", "default_value": "1.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1.0", "description": "Maximum step size (regularization). Defaults to 1.0." @@ -142017,7 +141444,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveRegressor.__init__.fit_intercept", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether the intercept should be estimated or not. If False, the\ndata is assumed to be already centered. Defaults to True." @@ -142033,7 +141460,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveRegressor.__init__.max_iter", "default_value": "1000", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1000", "description": "The maximum number of passes over the training data (aka epochs).\nIt only impacts the behavior in the ``fit`` method, and not the\n:meth:`partial_fit` method.\n\n.. versionadded:: 0.19" @@ -142049,7 +141476,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveRegressor.__init__.tol", "default_value": "0.001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float or None, default=1e-3", "description": "The stopping criterion. If it is not None, the iterations will stop\nwhen (loss > previous_loss - tol).\n\n.. versionadded:: 0.19" @@ -142074,7 +141501,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveRegressor.__init__.early_stopping", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Whether to use early stopping to terminate training when validation.\nscore is not improving. If set to True, it will automatically set aside\na fraction of training data as validation and terminate\ntraining when validation score is not improving by at least tol for\nn_iter_no_change consecutive epochs.\n\n.. versionadded:: 0.20" @@ -142090,7 +141517,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveRegressor.__init__.validation_fraction", "default_value": "0.1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.1", "description": "The proportion of training data to set aside as validation set for\nearly stopping. Must be between 0 and 1.\nOnly used if early_stopping is True.\n\n.. versionadded:: 0.20" @@ -142106,7 +141533,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveRegressor.__init__.n_iter_no_change", "default_value": "5", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=5", "description": "Number of iterations with no improvement to wait before early stopping.\n\n.. versionadded:: 0.20" @@ -142122,7 +141549,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveRegressor.__init__.shuffle", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether or not the training data should be shuffled after each epoch." @@ -142138,7 +141565,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveRegressor.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "The verbosity level." @@ -142154,7 +141581,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveRegressor.__init__.loss", "default_value": "'epsilon_insensitive'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str, default=\"epsilon_insensitive\"", "description": "The loss function to be used:\nepsilon_insensitive: equivalent to PA-I in the reference paper.\nsquared_epsilon_insensitive: equivalent to PA-II in the reference\npaper." @@ -142170,7 +141597,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveRegressor.__init__.epsilon", "default_value": "DEFAULT_EPSILON", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.1", "description": "If the difference between the current prediction and the correct label\nis below this threshold, the model is not updated." @@ -142186,7 +141613,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveRegressor.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance, default=None", "description": "Used to shuffle the training data, when ``shuffle`` is set to\n``True``. Pass an int for reproducible output across multiple\nfunction calls.\nSee :term:`Glossary `." @@ -142211,7 +141638,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveRegressor.__init__.warm_start", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "When set to True, reuse the solution of the previous call to fit as\ninitialization, otherwise, just erase the previous solution.\nSee :term:`the Glossary `.\n\nRepeatedly calling fit or partial_fit when warm_start is True can\nresult in a different solution than when calling fit a single time\nbecause of the way the data is shuffled." @@ -142227,7 +141654,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveRegressor.__init__.average", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool or int, default=False", "description": "When set to True, computes the averaged SGD weights and stores the\nresult in the ``coef_`` attribute. If set to an int greater than 1,\naveraging will begin once the total number of samples seen reaches\naverage. So average=10 will begin averaging after seeing 10 samples.\n\n.. versionadded:: 0.19\n parameter *average* to use weights averaging in SGD." @@ -142248,7 +141675,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -142264,7 +141691,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveRegressor.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -142277,7 +141704,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveRegressor.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Training data." @@ -142302,7 +141729,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveRegressor.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "numpy array of shape [n_samples]", "description": "Target values." @@ -142318,7 +141745,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveRegressor.fit.coef_init", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array, shape = [n_features]", "description": "The initial coefficients to warm-start the optimization." @@ -142343,7 +141770,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveRegressor.fit.intercept_init", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array, shape = [1]", "description": "The initial intercept to warm-start the optimization." @@ -142364,7 +141791,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit linear model with Passive Aggressive algorithm.", "docstring": "Fit linear model with Passive Aggressive algorithm.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training data.\n\n y : numpy array of shape [n_samples]\n Target values.\n\n coef_init : array, shape = [n_features]\n The initial coefficients to warm-start the optimization.\n\n intercept_init : array, shape = [1]\n The initial intercept to warm-start the optimization.\n\n Returns\n -------\n self : object\n Fitted estimator.\n " }, @@ -142380,7 +141807,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveRegressor.partial_fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -142393,7 +141820,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveRegressor.partial_fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Subset of training data." @@ -142418,7 +141845,7 @@ "qname": "sklearn.linear_model._passive_aggressive.PassiveAggressiveRegressor.partial_fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "numpy array of shape [n_samples]", "description": "Subset of target values." @@ -142430,7 +141857,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit linear model with Passive Aggressive algorithm.", "docstring": "Fit linear model with Passive Aggressive algorithm.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Subset of training data.\n\n y : numpy array of shape [n_samples]\n Subset of target values.\n\n Returns\n -------\n self : object\n Fitted estimator.\n " }, @@ -142446,7 +141873,7 @@ "qname": "sklearn.linear_model._perceptron.Perceptron.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -142459,14 +141886,14 @@ "qname": "sklearn.linear_model._perceptron.Perceptron.__init__.penalty", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'l2','l1','elasticnet'}, default=None", "description": "The penalty (aka regularization term) to be used." }, "type": { "kind": "EnumType", - "values": ["elasticnet", "l2", "l1"] + "values": ["l2", "l1", "elasticnet"] } }, { @@ -142475,7 +141902,7 @@ "qname": "sklearn.linear_model._perceptron.Perceptron.__init__.alpha", "default_value": "0.0001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0001", "description": "Constant that multiplies the regularization term if regularization is\nused." @@ -142491,7 +141918,7 @@ "qname": "sklearn.linear_model._perceptron.Perceptron.__init__.l1_ratio", "default_value": "0.15", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.15", "description": "The Elastic Net mixing parameter, with `0 <= l1_ratio <= 1`.\n`l1_ratio=0` corresponds to L2 penalty, `l1_ratio=1` to L1.\nOnly used if `penalty='elasticnet'`.\n\n.. versionadded:: 0.24" @@ -142507,7 +141934,7 @@ "qname": "sklearn.linear_model._perceptron.Perceptron.__init__.fit_intercept", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether the intercept should be estimated or not. If False, the\ndata is assumed to be already centered." @@ -142523,7 +141950,7 @@ "qname": "sklearn.linear_model._perceptron.Perceptron.__init__.max_iter", "default_value": "1000", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1000", "description": "The maximum number of passes over the training data (aka epochs).\nIt only impacts the behavior in the ``fit`` method, and not the\n:meth:`partial_fit` method.\n\n.. versionadded:: 0.19" @@ -142539,7 +141966,7 @@ "qname": "sklearn.linear_model._perceptron.Perceptron.__init__.tol", "default_value": "0.001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-3", "description": "The stopping criterion. If it is not None, the iterations will stop\nwhen (loss > previous_loss - tol).\n\n.. versionadded:: 0.19" @@ -142555,7 +141982,7 @@ "qname": "sklearn.linear_model._perceptron.Perceptron.__init__.shuffle", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether or not the training data should be shuffled after each epoch." @@ -142571,7 +141998,7 @@ "qname": "sklearn.linear_model._perceptron.Perceptron.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "The verbosity level." @@ -142587,7 +142014,7 @@ "qname": "sklearn.linear_model._perceptron.Perceptron.__init__.eta0", "default_value": "1.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1", "description": "Constant by which the updates are multiplied." @@ -142603,7 +142030,7 @@ "qname": "sklearn.linear_model._perceptron.Perceptron.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "The number of CPUs to use to do the OVA (One Versus All, for\nmulti-class problems) computation.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details." @@ -142619,7 +142046,7 @@ "qname": "sklearn.linear_model._perceptron.Perceptron.__init__.random_state", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=0", "description": "Used to shuffle the training data, when ``shuffle`` is set to\n``True``. Pass an int for reproducible output across multiple\nfunction calls.\nSee :term:`Glossary `." @@ -142648,7 +142075,7 @@ "qname": "sklearn.linear_model._perceptron.Perceptron.__init__.early_stopping", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Whether to use early stopping to terminate training when validation.\nscore is not improving. If set to True, it will automatically set aside\na stratified fraction of training data as validation and terminate\ntraining when validation score is not improving by at least tol for\nn_iter_no_change consecutive epochs.\n\n.. versionadded:: 0.20" @@ -142664,7 +142091,7 @@ "qname": "sklearn.linear_model._perceptron.Perceptron.__init__.validation_fraction", "default_value": "0.1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.1", "description": "The proportion of training data to set aside as validation set for\nearly stopping. Must be between 0 and 1.\nOnly used if early_stopping is True.\n\n.. versionadded:: 0.20" @@ -142680,7 +142107,7 @@ "qname": "sklearn.linear_model._perceptron.Perceptron.__init__.n_iter_no_change", "default_value": "5", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=5", "description": "Number of iterations with no improvement to wait before early stopping.\n\n.. versionadded:: 0.20" @@ -142696,7 +142123,7 @@ "qname": "sklearn.linear_model._perceptron.Perceptron.__init__.class_weight", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict, {class_label: weight} or \"balanced\", default=None", "description": "Preset for the class_weight fit parameter.\n\nWeights associated with classes. If not given, all classes\nare supposed to have weight one.\n\nThe \"balanced\" mode uses the values of y to automatically adjust\nweights inversely proportional to class frequencies in the input data\nas ``n_samples / (n_classes * np.bincount(y))``." @@ -142725,7 +142152,7 @@ "qname": "sklearn.linear_model._perceptron.Perceptron.__init__.warm_start", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "When set to True, reuse the solution of the previous call to fit as\ninitialization, otherwise, just erase the previous solution. See\n:term:`the Glossary `." @@ -142737,7 +142164,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -142753,7 +142180,7 @@ "qname": "sklearn.linear_model._quantile.QuantileRegressor.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -142766,7 +142193,7 @@ "qname": "sklearn.linear_model._quantile.QuantileRegressor.__init__.quantile", "default_value": "0.5", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.5", "description": "The quantile that the model tries to predict. It must be strictly\nbetween 0 and 1. If 0.5 (default), the model predicts the 50%\nquantile, i.e. the median." @@ -142782,7 +142209,7 @@ "qname": "sklearn.linear_model._quantile.QuantileRegressor.__init__.alpha", "default_value": "1.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1.0", "description": "Regularization constant that multiplies the L1 penalty term." @@ -142798,7 +142225,7 @@ "qname": "sklearn.linear_model._quantile.QuantileRegressor.__init__.fit_intercept", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether or not to fit the intercept." @@ -142814,14 +142241,14 @@ "qname": "sklearn.linear_model._quantile.QuantileRegressor.__init__.solver", "default_value": "'interior-point'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'highs-ds', 'highs-ipm', 'highs', 'interior-point', 'revised simplex'}, default='interior-point'", "description": "Method used by :func:`scipy.optimize.linprog` to solve the linear\nprogramming formulation. Note that the highs methods are recommended\nfor usage with `scipy>=1.6.0` because they are the fastest ones.\nSolvers \"highs-ds\", \"highs-ipm\" and \"highs\" support\nsparse input data and, in fact, always convert to sparse csc." }, "type": { "kind": "EnumType", - "values": ["interior-point", "highs-ds", "highs", "revised simplex", "highs-ipm"] + "values": ["highs", "revised simplex", "interior-point", "highs-ds", "highs-ipm"] } }, { @@ -142830,7 +142257,7 @@ "qname": "sklearn.linear_model._quantile.QuantileRegressor.__init__.solver_options", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict, default=None", "description": "Additional parameters passed to :func:`scipy.optimize.linprog` as\noptions. If `None` and if `solver='interior-point'`, then\n`{\"lstsq\": True}` is passed to :func:`scipy.optimize.linprog` for the\nsake of stability." @@ -142842,7 +142269,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -142858,7 +142285,7 @@ "qname": "sklearn.linear_model._quantile.QuantileRegressor.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -142871,7 +142298,7 @@ "qname": "sklearn.linear_model._quantile.QuantileRegressor.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Training data." @@ -142896,7 +142323,7 @@ "qname": "sklearn.linear_model._quantile.QuantileRegressor.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "Target values." @@ -142912,7 +142339,7 @@ "qname": "sklearn.linear_model._quantile.QuantileRegressor.fit.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "Sample weights." @@ -142924,7 +142351,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the model according to the given training data.", "docstring": "Fit the model according to the given training data.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training data.\n\n y : array-like of shape (n_samples,)\n Target values.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n Returns\n -------\n self : object\n Returns self.\n " }, @@ -142940,7 +142367,7 @@ "qname": "sklearn.linear_model._ransac.RANSACRegressor.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -142953,7 +142380,7 @@ "qname": "sklearn.linear_model._ransac.RANSACRegressor.__init__.estimator", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "object, default=None", "description": "Base estimator object which implements the following methods:\n\n * `fit(X, y)`: Fit model to given training data and target values.\n * `score(X, y)`: Returns the mean accuracy on the given test data,\n which is used for the stop criterion defined by `stop_score`.\n Additionally, the score is used to decide which of two equally\n large consensus sets is chosen as the better one.\n * `predict(X)`: Returns predicted values using the linear model,\n which is used to compute residual error using loss function.\n\nIf `estimator` is None, then\n:class:`~sklearn.linear_model.LinearRegression` is used for\ntarget values of dtype float.\n\nNote that the current implementation only supports regression\nestimators." @@ -142969,7 +142396,7 @@ "qname": "sklearn.linear_model._ransac.RANSACRegressor.__init__.min_samples", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int (>= 1) or float ([0, 1]), default=None", "description": "Minimum number of samples chosen randomly from original data. Treated\nas an absolute number of samples for `min_samples >= 1`, treated as a\nrelative number `ceil(min_samples * X.shape[0])` for\n`min_samples < 1`. This is typically chosen as the minimal number of\nsamples necessary to estimate the given `estimator`. By default a\n``sklearn.linear_model.LinearRegression()`` estimator is assumed and\n`min_samples` is chosen as ``X.shape[1] + 1``. This parameter is highly\ndependent upon the model, so if a `estimator` other than\n:class:`linear_model.LinearRegression` is used, the user is\nencouraged to provide a value.\n\n.. deprecated:: 1.0\n Not setting `min_samples` explicitly will raise an error in version\n 1.2 for models other than\n :class:`~sklearn.linear_model.LinearRegression`. To keep the old\n default behavior, set `min_samples=X.shape[1] + 1` explicitly." @@ -142994,7 +142421,7 @@ "qname": "sklearn.linear_model._ransac.RANSACRegressor.__init__.residual_threshold", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=None", "description": "Maximum residual for a data sample to be classified as an inlier.\nBy default the threshold is chosen as the MAD (median absolute\ndeviation) of the target values `y`. Points whose residuals are\nstrictly equal to the threshold are considered as inliers." @@ -143010,7 +142437,7 @@ "qname": "sklearn.linear_model._ransac.RANSACRegressor.__init__.is_data_valid", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "callable, default=None", "description": "This function is called with the randomly selected data before the\nmodel is fitted to it: `is_data_valid(X, y)`. If its return value is\nFalse the current randomly chosen sub-sample is skipped." @@ -143026,7 +142453,7 @@ "qname": "sklearn.linear_model._ransac.RANSACRegressor.__init__.is_model_valid", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "callable, default=None", "description": "This function is called with the estimated model and the randomly\nselected data: `is_model_valid(model, X, y)`. If its return value is\nFalse the current randomly chosen sub-sample is skipped.\nRejecting samples with this function is computationally costlier than\nwith `is_data_valid`. `is_model_valid` should therefore only be used if\nthe estimated model is needed for making the rejection decision." @@ -143042,7 +142469,7 @@ "qname": "sklearn.linear_model._ransac.RANSACRegressor.__init__.max_trials", "default_value": "100", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=100", "description": "Maximum number of iterations for random sample selection." @@ -143058,7 +142485,7 @@ "qname": "sklearn.linear_model._ransac.RANSACRegressor.__init__.max_skips", "default_value": "np.inf", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=np.inf", "description": "Maximum number of iterations that can be skipped due to finding zero\ninliers or invalid data defined by ``is_data_valid`` or invalid models\ndefined by ``is_model_valid``.\n\n.. versionadded:: 0.19" @@ -143074,7 +142501,7 @@ "qname": "sklearn.linear_model._ransac.RANSACRegressor.__init__.stop_n_inliers", "default_value": "np.inf", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=np.inf", "description": "Stop iteration if at least this number of inliers are found." @@ -143090,7 +142517,7 @@ "qname": "sklearn.linear_model._ransac.RANSACRegressor.__init__.stop_score", "default_value": "np.inf", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=np.inf", "description": "Stop iteration if score is greater equal than this threshold." @@ -143106,7 +142533,7 @@ "qname": "sklearn.linear_model._ransac.RANSACRegressor.__init__.stop_probability", "default_value": "0.99", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float in range [0, 1], default=0.99", "description": "RANSAC iteration stops if at least one outlier-free set of the training\ndata is sampled in RANSAC. This requires to generate at least N\nsamples (iterations)::\n\n N >= log(1 - probability) / log(1 - e**m)\n\nwhere the probability (confidence) is typically set to high value such\nas 0.99 (the default) and e is the current fraction of inliers w.r.t.\nthe total number of samples." @@ -143131,7 +142558,7 @@ "qname": "sklearn.linear_model._ransac.RANSACRegressor.__init__.loss", "default_value": "'absolute_error'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str, callable, default='absolute_error'", "description": "String inputs, 'absolute_error' and 'squared_error' are supported which\nfind the absolute error and squared error per sample respectively.\n\nIf ``loss`` is a callable, then it should be a function that takes\ntwo arrays as inputs, the true and predicted value and returns a 1-D\narray with the i-th value of the array corresponding to the loss\non ``X[i]``.\n\nIf the loss on a sample is greater than the ``residual_threshold``,\nthen this sample is classified as an outlier.\n\n.. versionadded:: 0.18\n\n.. deprecated:: 1.0\n The loss 'squared_loss' was deprecated in v1.0 and will be removed\n in version 1.2. Use `loss='squared_error'` which is equivalent.\n\n.. deprecated:: 1.0\n The loss 'absolute_loss' was deprecated in v1.0 and will be removed\n in version 1.2. Use `loss='absolute_error'` which is equivalent." @@ -143156,7 +142583,7 @@ "qname": "sklearn.linear_model._ransac.RANSACRegressor.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance, default=None", "description": "The generator used to initialize the centers.\nPass an int for reproducible output across multiple function calls.\nSee :term:`Glossary `." @@ -143181,7 +142608,7 @@ "qname": "sklearn.linear_model._ransac.RANSACRegressor.__init__.base_estimator", "default_value": "'deprecated'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "object, default=\"deprecated\"", "description": "Use `estimator` instead.\n\n.. deprecated:: 1.1\n `base_estimator` is deprecated and will be removed in 1.3.\n Use `estimator` instead." @@ -143193,7 +142620,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -143234,7 +142661,7 @@ "qname": "sklearn.linear_model._ransac.RANSACRegressor.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -143247,7 +142674,7 @@ "qname": "sklearn.linear_model._ransac.RANSACRegressor.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Training data." @@ -143272,7 +142699,7 @@ "qname": "sklearn.linear_model._ransac.RANSACRegressor.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,) or (n_samples, n_targets)", "description": "Target values." @@ -143288,7 +142715,7 @@ "qname": "sklearn.linear_model._ransac.RANSACRegressor.fit.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "Individual weights for each sample\nraises error if sample_weight is passed and estimator\nfit method does not support it.\n\n.. versionadded:: 0.18" @@ -143300,7 +142727,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit estimator using RANSAC algorithm.", "docstring": "Fit estimator using RANSAC algorithm.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training data.\n\n y : array-like of shape (n_samples,) or (n_samples, n_targets)\n Target values.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Individual weights for each sample\n raises error if sample_weight is passed and estimator\n fit method does not support it.\n\n .. versionadded:: 0.18\n\n Returns\n -------\n self : object\n Fitted `RANSACRegressor` estimator.\n\n Raises\n ------\n ValueError\n If no valid consensus set could be found. This occurs if\n `is_data_valid` and `is_model_valid` return False for all\n `max_trials` randomly chosen sub-samples.\n " }, @@ -143316,7 +142743,7 @@ "qname": "sklearn.linear_model._ransac.RANSACRegressor.predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -143329,7 +142756,7 @@ "qname": "sklearn.linear_model._ransac.RANSACRegressor.predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like or sparse matrix} of shape (n_samples, n_features)", "description": "Input data." @@ -143350,7 +142777,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict using the estimated model.\n\nThis is a wrapper for `estimator_.predict(X)`.", "docstring": "Predict using the estimated model.\n\n This is a wrapper for `estimator_.predict(X)`.\n\n Parameters\n ----------\n X : {array-like or sparse matrix} of shape (n_samples, n_features)\n Input data.\n\n Returns\n -------\n y : array, shape = [n_samples] or [n_samples, n_targets]\n Returns predicted values.\n " }, @@ -143366,7 +142793,7 @@ "qname": "sklearn.linear_model._ransac.RANSACRegressor.score.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -143379,7 +142806,7 @@ "qname": "sklearn.linear_model._ransac.RANSACRegressor.score.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "(array-like or sparse matrix} of shape (n_samples, n_features)", "description": "Training data." @@ -143395,7 +142822,7 @@ "qname": "sklearn.linear_model._ransac.RANSACRegressor.score.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,) or (n_samples, n_targets)", "description": "Target values." @@ -143407,7 +142834,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Return the score of the prediction.\n\nThis is a wrapper for `estimator_.score(X, y)`.", "docstring": "Return the score of the prediction.\n\n This is a wrapper for `estimator_.score(X, y)`.\n\n Parameters\n ----------\n X : (array-like or sparse matrix} of shape (n_samples, n_features)\n Training data.\n\n y : array-like of shape (n_samples,) or (n_samples, n_targets)\n Target values.\n\n Returns\n -------\n z : float\n Score of the prediction.\n " }, @@ -143499,7 +142926,7 @@ "qname": "sklearn.linear_model._ridge.Ridge.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -143512,7 +142939,7 @@ "qname": "sklearn.linear_model._ridge.Ridge.__init__.alpha", "default_value": "1.0", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{float, ndarray of shape (n_targets,)}, default=1.0", "description": "Constant that multiplies the L2 term, controlling regularization\nstrength. `alpha` must be a non-negative float i.e. in `[0, inf)`.\n\nWhen `alpha = 0`, the objective is equivalent to ordinary least\nsquares, solved by the :class:`LinearRegression` object. For numerical\nreasons, using `alpha = 0` with the `Ridge` object is not advised.\nInstead, you should use the :class:`LinearRegression` object.\n\nIf an array is passed, penalties are assumed to be specific to the\ntargets. Hence they must correspond in number." @@ -143528,7 +142955,7 @@ "qname": "sklearn.linear_model._ridge.Ridge.__init__.fit_intercept", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to fit the intercept for this model. If set\nto false, no intercept will be used in calculations\n(i.e. ``X`` and ``y`` are expected to be centered)." @@ -143544,7 +142971,7 @@ "qname": "sklearn.linear_model._ridge.Ridge.__init__.normalize", "default_value": "'deprecated'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "This parameter is ignored when ``fit_intercept`` is set to False.\nIf True, the regressors X will be normalized before regression by\nsubtracting the mean and dividing by the l2-norm.\nIf you wish to standardize, please use\n:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``\non an estimator with ``normalize=False``.\n\n.. deprecated:: 1.0\n ``normalize`` was deprecated in version 1.0 and\n will be removed in 1.2." @@ -143560,7 +142987,7 @@ "qname": "sklearn.linear_model._ridge.Ridge.__init__.copy_X", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If True, X will be copied; else, it may be overwritten." @@ -143576,7 +143003,7 @@ "qname": "sklearn.linear_model._ridge.Ridge.__init__.max_iter", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Maximum number of iterations for conjugate gradient solver.\nFor 'sparse_cg' and 'lsqr' solvers, the default value is determined\nby scipy.sparse.linalg. For 'sag' solver, the default value is 1000.\nFor 'lbfgs' solver, the default value is 15000." @@ -143592,7 +143019,7 @@ "qname": "sklearn.linear_model._ridge.Ridge.__init__.tol", "default_value": "0.001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-3", "description": "Precision of the solution." @@ -143608,14 +143035,14 @@ "qname": "sklearn.linear_model._ridge.Ridge.__init__.solver", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag', 'saga', 'lbfgs'}, default='auto'", "description": "Solver to use in the computational routines:\n\n- 'auto' chooses the solver automatically based on the type of data.\n\n- 'svd' uses a Singular Value Decomposition of X to compute the Ridge\n coefficients. It is the most stable solver, in particular more stable\n for singular matrices than 'cholesky' at the cost of being slower.\n\n- 'cholesky' uses the standard scipy.linalg.solve function to\n obtain a closed-form solution.\n\n- 'sparse_cg' uses the conjugate gradient solver as found in\n scipy.sparse.linalg.cg. As an iterative algorithm, this solver is\n more appropriate than 'cholesky' for large-scale data\n (possibility to set `tol` and `max_iter`).\n\n- 'lsqr' uses the dedicated regularized least-squares routine\n scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative\n procedure.\n\n- 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses\n its improved, unbiased version named SAGA. Both methods also use an\n iterative procedure, and are often faster than other solvers when\n both n_samples and n_features are large. Note that 'sag' and\n 'saga' fast convergence is only guaranteed on features with\n approximately the same scale. You can preprocess the data with a\n scaler from sklearn.preprocessing.\n\n- 'lbfgs' uses L-BFGS-B algorithm implemented in\n `scipy.optimize.minimize`. It can be used only when `positive`\n is True.\n\nAll solvers except 'svd' support both dense and sparse data. However, only\n'lsqr', 'sag', 'sparse_cg', and 'lbfgs' support sparse input when\n`fit_intercept` is True.\n\n.. versionadded:: 0.17\n Stochastic Average Gradient descent solver.\n.. versionadded:: 0.19\n SAGA solver." }, "type": { "kind": "EnumType", - "values": ["lsqr", "cholesky", "sparse_cg", "lbfgs", "svd", "auto", "saga", "sag"] + "values": ["auto", "svd", "saga", "sparse_cg", "sag", "lsqr", "cholesky", "lbfgs"] } }, { @@ -143624,7 +143051,7 @@ "qname": "sklearn.linear_model._ridge.Ridge.__init__.positive", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "When set to ``True``, forces the coefficients to be positive.\nOnly 'lbfgs' solver is supported in this case." @@ -143640,7 +143067,7 @@ "qname": "sklearn.linear_model._ridge.Ridge.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance, default=None", "description": "Used when ``solver`` == 'sag' or 'saga' to shuffle the data.\nSee :term:`Glossary ` for details.\n\n.. versionadded:: 0.17\n `random_state` to support Stochastic Average Gradient." @@ -143661,7 +143088,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -143677,7 +143104,7 @@ "qname": "sklearn.linear_model._ridge.Ridge.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -143690,7 +143117,7 @@ "qname": "sklearn.linear_model._ridge.Ridge.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{ndarray, sparse matrix} of shape (n_samples, n_features)", "description": "Training data." @@ -143715,7 +143142,7 @@ "qname": "sklearn.linear_model._ridge.Ridge.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (n_samples,) or (n_samples, n_targets)", "description": "Target values." @@ -143731,7 +143158,7 @@ "qname": "sklearn.linear_model._ridge.Ridge.fit.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "float or ndarray of shape (n_samples,), default=None", "description": "Individual weights for each sample. If given a float, every sample\nwill have the same weight." @@ -143752,7 +143179,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit Ridge regression model.", "docstring": "Fit Ridge regression model.\n\n Parameters\n ----------\n X : {ndarray, sparse matrix} of shape (n_samples, n_features)\n Training data.\n\n y : ndarray of shape (n_samples,) or (n_samples, n_targets)\n Target values.\n\n sample_weight : float or ndarray of shape (n_samples,), default=None\n Individual weights for each sample. If given a float, every sample\n will have the same weight.\n\n Returns\n -------\n self : object\n Fitted estimator.\n " }, @@ -143768,7 +143195,7 @@ "qname": "sklearn.linear_model._ridge.RidgeClassifier.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -143781,7 +143208,7 @@ "qname": "sklearn.linear_model._ridge.RidgeClassifier.__init__.alpha", "default_value": "1.0", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1.0", "description": "Regularization strength; must be a positive float. Regularization\nimproves the conditioning of the problem and reduces the variance of\nthe estimates. Larger values specify stronger regularization.\nAlpha corresponds to ``1 / (2C)`` in other linear models such as\n:class:`~sklearn.linear_model.LogisticRegression` or\n:class:`~sklearn.svm.LinearSVC`." @@ -143797,7 +143224,7 @@ "qname": "sklearn.linear_model._ridge.RidgeClassifier.__init__.fit_intercept", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to calculate the intercept for this model. If set to false, no\nintercept will be used in calculations (e.g. data is expected to be\nalready centered)." @@ -143813,7 +143240,7 @@ "qname": "sklearn.linear_model._ridge.RidgeClassifier.__init__.normalize", "default_value": "'deprecated'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "This parameter is ignored when ``fit_intercept`` is set to False.\nIf True, the regressors X will be normalized before regression by\nsubtracting the mean and dividing by the l2-norm.\nIf you wish to standardize, please use\n:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``\non an estimator with ``normalize=False``.\n\n.. deprecated:: 1.0\n ``normalize`` was deprecated in version 1.0 and\n will be removed in 1.2." @@ -143829,7 +143256,7 @@ "qname": "sklearn.linear_model._ridge.RidgeClassifier.__init__.copy_X", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If True, X will be copied; else, it may be overwritten." @@ -143845,7 +143272,7 @@ "qname": "sklearn.linear_model._ridge.RidgeClassifier.__init__.max_iter", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Maximum number of iterations for conjugate gradient solver.\nThe default value is determined by scipy.sparse.linalg." @@ -143861,7 +143288,7 @@ "qname": "sklearn.linear_model._ridge.RidgeClassifier.__init__.tol", "default_value": "0.001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-3", "description": "Precision of the solution." @@ -143877,7 +143304,7 @@ "qname": "sklearn.linear_model._ridge.RidgeClassifier.__init__.class_weight", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict or 'balanced', default=None", "description": "Weights associated with classes in the form ``{class_label: weight}``.\nIf not given, all classes are supposed to have weight one.\n\nThe \"balanced\" mode uses the values of y to automatically adjust\nweights inversely proportional to class frequencies in the input data\nas ``n_samples / (n_classes * np.bincount(y))``." @@ -143902,14 +143329,14 @@ "qname": "sklearn.linear_model._ridge.RidgeClassifier.__init__.solver", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag', 'saga', 'lbfgs'}, default='auto'", "description": "Solver to use in the computational routines:\n\n- 'auto' chooses the solver automatically based on the type of data.\n\n- 'svd' uses a Singular Value Decomposition of X to compute the Ridge\n coefficients. It is the most stable solver, in particular more stable\n for singular matrices than 'cholesky' at the cost of being slower.\n\n- 'cholesky' uses the standard scipy.linalg.solve function to\n obtain a closed-form solution.\n\n- 'sparse_cg' uses the conjugate gradient solver as found in\n scipy.sparse.linalg.cg. As an iterative algorithm, this solver is\n more appropriate than 'cholesky' for large-scale data\n (possibility to set `tol` and `max_iter`).\n\n- 'lsqr' uses the dedicated regularized least-squares routine\n scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative\n procedure.\n\n- 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses\n its unbiased and more flexible version named SAGA. Both methods\n use an iterative procedure, and are often faster than other solvers\n when both n_samples and n_features are large. Note that 'sag' and\n 'saga' fast convergence is only guaranteed on features with\n approximately the same scale. You can preprocess the data with a\n scaler from sklearn.preprocessing.\n\n .. versionadded:: 0.17\n Stochastic Average Gradient descent solver.\n .. versionadded:: 0.19\n SAGA solver.\n\n- 'lbfgs' uses L-BFGS-B algorithm implemented in\n `scipy.optimize.minimize`. It can be used only when `positive`\n is True." }, "type": { "kind": "EnumType", - "values": ["lsqr", "cholesky", "sparse_cg", "lbfgs", "svd", "auto", "saga", "sag"] + "values": ["auto", "svd", "saga", "sparse_cg", "sag", "lsqr", "cholesky", "lbfgs"] } }, { @@ -143918,7 +143345,7 @@ "qname": "sklearn.linear_model._ridge.RidgeClassifier.__init__.positive", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "When set to ``True``, forces the coefficients to be positive.\nOnly 'lbfgs' solver is supported in this case." @@ -143934,7 +143361,7 @@ "qname": "sklearn.linear_model._ridge.RidgeClassifier.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance, default=None", "description": "Used when ``solver`` == 'sag' or 'saga' to shuffle the data.\nSee :term:`Glossary ` for details." @@ -143955,7 +143382,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -143971,7 +143398,7 @@ "qname": "sklearn.linear_model._ridge.RidgeClassifier.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -143984,7 +143411,7 @@ "qname": "sklearn.linear_model._ridge.RidgeClassifier.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{ndarray, sparse matrix} of shape (n_samples, n_features)", "description": "Training data." @@ -144009,7 +143436,7 @@ "qname": "sklearn.linear_model._ridge.RidgeClassifier.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (n_samples,)", "description": "Target values." @@ -144025,7 +143452,7 @@ "qname": "sklearn.linear_model._ridge.RidgeClassifier.fit.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "float or ndarray of shape (n_samples,), default=None", "description": "Individual weights for each sample. If given a float, every sample\nwill have the same weight.\n\n.. versionadded:: 0.17\n *sample_weight* support to RidgeClassifier." @@ -144046,7 +143473,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit Ridge classifier model.", "docstring": "Fit Ridge classifier model.\n\n Parameters\n ----------\n X : {ndarray, sparse matrix} of shape (n_samples, n_features)\n Training data.\n\n y : ndarray of shape (n_samples,)\n Target values.\n\n sample_weight : float or ndarray of shape (n_samples,), default=None\n Individual weights for each sample. If given a float, every sample\n will have the same weight.\n\n .. versionadded:: 0.17\n *sample_weight* support to RidgeClassifier.\n\n Returns\n -------\n self : object\n Instance of the estimator.\n " }, @@ -144062,7 +143489,7 @@ "qname": "sklearn.linear_model._ridge.RidgeClassifierCV.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -144075,7 +143502,7 @@ "qname": "sklearn.linear_model._ridge.RidgeClassifierCV.__init__.alphas", "default_value": "(0.1, 1.0, 10.0)", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (n_alphas,), default=(0.1, 1.0, 10.0)", "description": "Array of alpha values to try.\nRegularization strength; must be a positive float. Regularization\nimproves the conditioning of the problem and reduces the variance of\nthe estimates. Larger values specify stronger regularization.\nAlpha corresponds to ``1 / (2C)`` in other linear models such as\n:class:`~sklearn.linear_model.LogisticRegression` or\n:class:`~sklearn.svm.LinearSVC`." @@ -144091,7 +143518,7 @@ "qname": "sklearn.linear_model._ridge.RidgeClassifierCV.__init__.fit_intercept", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to calculate the intercept for this model. If set\nto false, no intercept will be used in calculations\n(i.e. data is expected to be centered)." @@ -144107,7 +143534,7 @@ "qname": "sklearn.linear_model._ridge.RidgeClassifierCV.__init__.normalize", "default_value": "'deprecated'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "This parameter is ignored when ``fit_intercept`` is set to False.\nIf True, the regressors X will be normalized before regression by\nsubtracting the mean and dividing by the l2-norm.\nIf you wish to standardize, please use\n:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``\non an estimator with ``normalize=False``.\n\n.. deprecated:: 1.0\n ``normalize`` was deprecated in version 1.0 and\n will be removed in 1.2." @@ -144123,7 +143550,7 @@ "qname": "sklearn.linear_model._ridge.RidgeClassifierCV.__init__.scoring", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str, callable, default=None", "description": "A string (see model evaluation documentation) or\na scorer callable object / function with signature\n``scorer(estimator, X, y)``." @@ -144148,7 +143575,7 @@ "qname": "sklearn.linear_model._ridge.RidgeClassifierCV.__init__.cv", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, cross-validation generator or an iterable, default=None", "description": "Determines the cross-validation splitting strategy.\nPossible inputs for cv are:\n\n- None, to use the efficient Leave-One-Out cross-validation\n- integer, to specify the number of folds.\n- :term:`CV splitter`,\n- An iterable yielding (train, test) splits as arrays of indices.\n\nRefer :ref:`User Guide ` for the various\ncross-validation strategies that can be used here." @@ -144177,7 +143604,7 @@ "qname": "sklearn.linear_model._ridge.RidgeClassifierCV.__init__.class_weight", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict or 'balanced', default=None", "description": "Weights associated with classes in the form ``{class_label: weight}``.\nIf not given, all classes are supposed to have weight one.\n\nThe \"balanced\" mode uses the values of y to automatically adjust\nweights inversely proportional to class frequencies in the input data\nas ``n_samples / (n_classes * np.bincount(y))``." @@ -144202,7 +143629,7 @@ "qname": "sklearn.linear_model._ridge.RidgeClassifierCV.__init__.store_cv_values", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Flag indicating if the cross-validation values corresponding to\neach alpha should be stored in the ``cv_values_`` attribute (see\nbelow). This flag is only compatible with ``cv=None`` (i.e. using\nLeave-One-Out Cross-Validation)." @@ -144214,7 +143641,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -144255,7 +143682,7 @@ "qname": "sklearn.linear_model._ridge.RidgeClassifierCV.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -144268,7 +143695,7 @@ "qname": "sklearn.linear_model._ridge.RidgeClassifierCV.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (n_samples, n_features)", "description": "Training vectors, where `n_samples` is the number of samples\nand `n_features` is the number of features. When using GCV,\nwill be cast to float64 if necessary." @@ -144284,7 +143711,7 @@ "qname": "sklearn.linear_model._ridge.RidgeClassifierCV.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (n_samples,)", "description": "Target values. Will be cast to X's dtype if necessary." @@ -144300,7 +143727,7 @@ "qname": "sklearn.linear_model._ridge.RidgeClassifierCV.fit.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "float or ndarray of shape (n_samples,), default=None", "description": "Individual weights for each sample. If given a float, every sample\nwill have the same weight." @@ -144321,7 +143748,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit Ridge classifier with cv.", "docstring": "Fit Ridge classifier with cv.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Training vectors, where `n_samples` is the number of samples\n and `n_features` is the number of features. When using GCV,\n will be cast to float64 if necessary.\n\n y : ndarray of shape (n_samples,)\n Target values. Will be cast to X's dtype if necessary.\n\n sample_weight : float or ndarray of shape (n_samples,), default=None\n Individual weights for each sample. If given a float, every sample\n will have the same weight.\n\n Returns\n -------\n self : object\n Fitted estimator.\n " }, @@ -147766,7 +147193,7 @@ }, "type": { "kind": "EnumType", - "values": ["lsqr", "cholesky", "sparse_cg", "lbfgs", "svd", "auto", "saga", "sag"] + "values": ["auto", "svd", "saga", "sparse_cg", "sag", "lsqr", "cholesky", "lbfgs"] } }, { @@ -147963,7 +147390,7 @@ }, "type": { "kind": "EnumType", - "values": ["log", "squared", "multinomial"] + "values": ["squared", "multinomial", "log"] } }, { @@ -148096,7 +147523,7 @@ }, "type": { "kind": "EnumType", - "values": ["log", "squared", "multinomial"] + "values": ["squared", "multinomial", "log"] } }, { @@ -151081,7 +150508,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDClassifier.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -151094,7 +150521,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDClassifier.__init__.loss", "default_value": "'hinge'", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{'hinge', 'log_loss', 'log', 'modified_huber', 'squared_hinge', 'perceptron', 'squared_error', 'huber', 'epsilon_insensitive', 'squared_epsilon_insensitive'}, default='hinge'", "description": "The loss function to be used.\n\n- 'hinge' gives a linear SVM.\n- 'log_loss' gives logistic regression, a probabilistic classifier.\n- 'modified_huber' is another smooth loss that brings tolerance to\n outliers as well as probability estimates.\n- 'squared_hinge' is like hinge but is quadratically penalized.\n- 'perceptron' is the linear loss used by the perceptron algorithm.\n- The other losses, 'squared_error', 'huber', 'epsilon_insensitive' and\n 'squared_epsilon_insensitive' are designed for regression but can be useful\n in classification as well; see\n :class:`~sklearn.linear_model.SGDRegressor` for a description.\n\nMore details about the losses formulas can be found in the\n:ref:`User Guide `.\n\n.. deprecated:: 1.0\n The loss 'squared_loss' was deprecated in v1.0 and will be removed\n in version 1.2. Use `loss='squared_error'` which is equivalent.\n\n.. deprecated:: 1.1\n The loss 'log' was deprecated in v1.1 and will be removed\n in version 1.3. Use `loss='log_loss'` which is equivalent." @@ -151102,16 +150529,16 @@ "type": { "kind": "EnumType", "values": [ - "squared_epsilon_insensitive", "epsilon_insensitive", - "log_loss", - "squared_hinge", - "log", - "huber", "squared_error", - "hinge", "perceptron", - "modified_huber" + "huber", + "hinge", + "modified_huber", + "squared_hinge", + "squared_epsilon_insensitive", + "log_loss", + "log" ] } }, @@ -151121,14 +150548,14 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDClassifier.__init__.penalty", "default_value": "'l2'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'l2', 'l1', 'elasticnet'}, default='l2'", "description": "The penalty (aka regularization term) to be used. Defaults to 'l2'\nwhich is the standard regularizer for linear SVM models. 'l1' and\n'elasticnet' might bring sparsity to the model (feature selection)\nnot achievable with 'l2'." }, "type": { "kind": "EnumType", - "values": ["elasticnet", "l2", "l1"] + "values": ["l2", "l1", "elasticnet"] } }, { @@ -151137,7 +150564,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDClassifier.__init__.alpha", "default_value": "0.0001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0001", "description": "Constant that multiplies the regularization term. The higher the\nvalue, the stronger the regularization.\nAlso used to compute the learning rate when set to `learning_rate` is\nset to 'optimal'.\nValues must be in the range `[0.0, inf)`." @@ -151153,7 +150580,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDClassifier.__init__.l1_ratio", "default_value": "0.15", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.15", "description": "The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.\nl1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.\nOnly used if `penalty` is 'elasticnet'.\nValues must be in the range `[0.0, 1.0]`." @@ -151182,7 +150609,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDClassifier.__init__.fit_intercept", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether the intercept should be estimated or not. If False, the\ndata is assumed to be already centered." @@ -151198,7 +150625,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDClassifier.__init__.max_iter", "default_value": "1000", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1000", "description": "The maximum number of passes over the training data (aka epochs).\nIt only impacts the behavior in the ``fit`` method, and not the\n:meth:`partial_fit` method.\nValues must be in the range `[1, inf)`.\n\n.. versionadded:: 0.19" @@ -151214,7 +150641,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDClassifier.__init__.tol", "default_value": "0.001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-3", "description": "The stopping criterion. If it is not None, training will stop\nwhen (loss > best_loss - tol) for ``n_iter_no_change`` consecutive\nepochs.\nConvergence is checked against the training loss or the\nvalidation loss depending on the `early_stopping` parameter.\nValues must be in the range `[0.0, inf)`.\n\n.. versionadded:: 0.19" @@ -151230,7 +150657,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDClassifier.__init__.shuffle", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether or not the training data should be shuffled after each epoch." @@ -151246,7 +150673,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDClassifier.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "The verbosity level.\nValues must be in the range `[0, inf)`." @@ -151262,7 +150689,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDClassifier.__init__.epsilon", "default_value": "DEFAULT_EPSILON", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.1", "description": "Epsilon in the epsilon-insensitive loss functions; only if `loss` is\n'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.\nFor 'huber', determines the threshold at which it becomes less\nimportant to get the prediction exactly right.\nFor epsilon-insensitive, any differences between the current prediction\nand the correct label are ignored if they are less than this threshold.\nValues must be in the range `[0.0, inf)`." @@ -151278,7 +150705,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDClassifier.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "The number of CPUs to use to do the OVA (One Versus All, for\nmulti-class problems) computation.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details." @@ -151294,7 +150721,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDClassifier.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance, default=None", "description": "Used for shuffling the data, when ``shuffle`` is set to ``True``.\nPass an int for reproducible output across multiple function calls.\nSee :term:`Glossary `.\nInteger values must be in the range `[0, 2**32 - 1]`." @@ -151319,7 +150746,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDClassifier.__init__.learning_rate", "default_value": "'optimal'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str, default='optimal'", "description": "The learning rate schedule:\n\n- 'constant': `eta = eta0`\n- 'optimal': `eta = 1.0 / (alpha * (t + t0))`\n where `t0` is chosen by a heuristic proposed by Leon Bottou.\n- 'invscaling': `eta = eta0 / pow(t, power_t)`\n- 'adaptive': `eta = eta0`, as long as the training keeps decreasing.\n Each time n_iter_no_change consecutive epochs fail to decrease the\n training loss by tol or fail to increase validation score by tol if\n `early_stopping` is `True`, the current learning rate is divided by 5.\n\n .. versionadded:: 0.20\n Added 'adaptive' option" @@ -151335,7 +150762,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDClassifier.__init__.eta0", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0", "description": "The initial learning rate for the 'constant', 'invscaling' or\n'adaptive' schedules. The default value is 0.0 as eta0 is not used by\nthe default schedule 'optimal'.\nValues must be in the range `(0.0, inf)`." @@ -151351,7 +150778,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDClassifier.__init__.power_t", "default_value": "0.5", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.5", "description": "The exponent for inverse scaling learning rate [default 0.5].\nValues must be in the range `(-inf, inf)`." @@ -151367,7 +150794,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDClassifier.__init__.early_stopping", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Whether to use early stopping to terminate training when validation\nscore is not improving. If set to `True`, it will automatically set aside\na stratified fraction of training data as validation and terminate\ntraining when validation score returned by the `score` method is not\nimproving by at least tol for n_iter_no_change consecutive epochs.\n\n.. versionadded:: 0.20\n Added 'early_stopping' option" @@ -151383,7 +150810,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDClassifier.__init__.validation_fraction", "default_value": "0.1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.1", "description": "The proportion of training data to set aside as validation set for\nearly stopping. Must be between 0 and 1.\nOnly used if `early_stopping` is True.\nValues must be in the range `(0.0, 1.0)`.\n\n.. versionadded:: 0.20\n Added 'validation_fraction' option" @@ -151412,7 +150839,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDClassifier.__init__.n_iter_no_change", "default_value": "5", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=5", "description": "Number of iterations with no improvement to wait before stopping\nfitting.\nConvergence is checked against the training loss or the\nvalidation loss depending on the `early_stopping` parameter.\nInteger values must be in the range `[1, max_iter)`.\n\n.. versionadded:: 0.20\n Added 'n_iter_no_change' option" @@ -151428,7 +150855,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDClassifier.__init__.class_weight", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict, {class_label: weight} or \"balanced\", default=None", "description": "Preset for the class_weight fit parameter.\n\nWeights associated with classes. If not given, all classes\nare supposed to have weight one.\n\nThe \"balanced\" mode uses the values of y to automatically adjust\nweights inversely proportional to class frequencies in the input data\nas ``n_samples / (n_classes * np.bincount(y))``." @@ -151457,7 +150884,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDClassifier.__init__.warm_start", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "When set to True, reuse the solution of the previous call to fit as\ninitialization, otherwise, just erase the previous solution.\nSee :term:`the Glossary `.\n\nRepeatedly calling fit or partial_fit when warm_start is True can\nresult in a different solution than when calling fit a single time\nbecause of the way the data is shuffled.\nIf a dynamic learning rate is used, the learning rate is adapted\ndepending on the number of samples already seen. Calling ``fit`` resets\nthis counter, while ``partial_fit`` will result in increasing the\nexisting counter." @@ -151473,7 +150900,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDClassifier.__init__.average", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool or int, default=False", "description": "When set to `True`, computes the averaged SGD weights across all\nupdates and stores the result in the ``coef_`` attribute. If set to\nan int greater than 1, averaging will begin once the total number of\nsamples seen reaches `average`. So ``average=10`` will begin\naveraging after seeing 10 samples.\nInteger values must be in the range `[1, n_samples]`." @@ -151494,7 +150921,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -151560,7 +150987,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDClassifier.predict_log_proba.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -151573,7 +151000,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDClassifier.predict_log_proba.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Input data for prediction." @@ -151594,7 +151021,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Log of probability estimates.\n\nThis method is only available for log loss and modified Huber loss.\n\nWhen loss=\"modified_huber\", probability estimates may be hard zeros\nand ones, so taking the logarithm is not possible.\n\nSee ``predict_proba`` for details.", "docstring": "Log of probability estimates.\n\n This method is only available for log loss and modified Huber loss.\n\n When loss=\"modified_huber\", probability estimates may be hard zeros\n and ones, so taking the logarithm is not possible.\n\n See ``predict_proba`` for details.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Input data for prediction.\n\n Returns\n -------\n T : array-like, shape (n_samples, n_classes)\n Returns the log-probability of the sample for each class in the\n model, where classes are ordered as they are in\n `self.classes_`.\n " }, @@ -151610,7 +151037,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDClassifier.predict_proba.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -151623,7 +151050,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDClassifier.predict_proba.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix}, shape (n_samples, n_features)", "description": "Input data for prediction." @@ -151644,7 +151071,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Probability estimates.\n\nThis method is only available for log loss and modified Huber loss.\n\nMulticlass probability estimates are derived from binary (one-vs.-rest)\nestimates by simple normalization, as recommended by Zadrozny and\nElkan.\n\nBinary probability estimates for loss=\"modified_huber\" are given by\n(clip(decision_function(X), -1, 1) + 1) / 2. For other loss functions\nit is necessary to perform proper probability calibration by wrapping\nthe classifier with\n:class:`~sklearn.calibration.CalibratedClassifierCV` instead.", "docstring": "Probability estimates.\n\n This method is only available for log loss and modified Huber loss.\n\n Multiclass probability estimates are derived from binary (one-vs.-rest)\n estimates by simple normalization, as recommended by Zadrozny and\n Elkan.\n\n Binary probability estimates for loss=\"modified_huber\" are given by\n (clip(decision_function(X), -1, 1) + 1) / 2. For other loss functions\n it is necessary to perform proper probability calibration by wrapping\n the classifier with\n :class:`~sklearn.calibration.CalibratedClassifierCV` instead.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n Input data for prediction.\n\n Returns\n -------\n ndarray of shape (n_samples, n_classes)\n Returns the probability of the sample for each class in the model,\n where classes are ordered as they are in `self.classes_`.\n\n References\n ----------\n Zadrozny and Elkan, \"Transforming classifier scores into multiclass\n probability estimates\", SIGKDD'02,\n https://dl.acm.org/doi/pdf/10.1145/775047.775151\n\n The justification for the formula in the loss=\"modified_huber\"\n case is in the appendix B in:\n http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf\n " }, @@ -151660,7 +151087,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDOneClassSVM.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -151673,7 +151100,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDOneClassSVM.__init__.nu", "default_value": "0.5", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.5", "description": "The nu parameter of the One Class SVM: an upper bound on the\nfraction of training errors and a lower bound of the fraction of\nsupport vectors. Should be in the interval (0, 1]. By default 0.5\nwill be taken." @@ -151702,7 +151129,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDOneClassSVM.__init__.fit_intercept", "default_value": "True", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether the intercept should be estimated or not. Defaults to True." @@ -151718,7 +151145,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDOneClassSVM.__init__.max_iter", "default_value": "1000", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1000", "description": "The maximum number of passes over the training data (aka epochs).\nIt only impacts the behavior in the ``fit`` method, and not the\n`partial_fit`. Defaults to 1000." @@ -151734,7 +151161,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDOneClassSVM.__init__.tol", "default_value": "0.001", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "float or None, default=1e-3", "description": "The stopping criterion. If it is not None, the iterations will stop\nwhen (loss > previous_loss - tol). Defaults to 1e-3." @@ -151759,7 +151186,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDOneClassSVM.__init__.shuffle", "default_value": "True", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether or not the training data should be shuffled after each epoch.\nDefaults to True." @@ -151775,7 +151202,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDOneClassSVM.__init__.verbose", "default_value": "0", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "The verbosity level." @@ -151791,7 +151218,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDOneClassSVM.__init__.random_state", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "The seed of the pseudo random number generator to use when shuffling\nthe data. If int, random_state is the seed used by the random number\ngenerator; If RandomState instance, random_state is the random number\ngenerator; If None, the random number generator is the RandomState\ninstance used by `np.random`." @@ -151820,14 +151247,14 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDOneClassSVM.__init__.learning_rate", "default_value": "'optimal'", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{'constant', 'optimal', 'invscaling', 'adaptive'}, default='optimal'", "description": "The learning rate schedule to use with `fit`. (If using `partial_fit`,\nlearning rate must be controlled directly).\n\n- 'constant': `eta = eta0`\n- 'optimal': `eta = 1.0 / (alpha * (t + t0))`\n where t0 is chosen by a heuristic proposed by Leon Bottou.\n- 'invscaling': `eta = eta0 / pow(t, power_t)`\n- 'adaptive': eta = eta0, as long as the training keeps decreasing.\n Each time n_iter_no_change consecutive epochs fail to decrease the\n training loss by tol or fail to increase validation score by tol if\n early_stopping is True, the current learning rate is divided by 5." }, "type": { "kind": "EnumType", - "values": ["adaptive", "invscaling", "constant", "optimal"] + "values": ["invscaling", "optimal", "constant", "adaptive"] } }, { @@ -151836,7 +151263,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDOneClassSVM.__init__.eta0", "default_value": "0.0", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0", "description": "The initial learning rate for the 'constant', 'invscaling' or\n'adaptive' schedules. The default value is 0.0 as eta0 is not used by\nthe default schedule 'optimal'." @@ -151852,7 +151279,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDOneClassSVM.__init__.power_t", "default_value": "0.5", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.5", "description": "The exponent for inverse scaling learning rate [default 0.5]." @@ -151868,7 +151295,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDOneClassSVM.__init__.warm_start", "default_value": "False", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "When set to True, reuse the solution of the previous call to fit as\ninitialization, otherwise, just erase the previous solution.\nSee :term:`the Glossary `.\n\nRepeatedly calling fit or partial_fit when warm_start is True can\nresult in a different solution than when calling fit a single time\nbecause of the way the data is shuffled.\nIf a dynamic learning rate is used, the learning rate is adapted\ndepending on the number of samples already seen. Calling ``fit`` resets\nthis counter, while ``partial_fit`` will result in increasing the\nexisting counter." @@ -151884,7 +151311,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDOneClassSVM.__init__.average", "default_value": "False", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "bool or int, default=False", "description": "When set to True, computes the averaged SGD weights and stores the\nresult in the ``coef_`` attribute. If set to an int greater than 1,\naveraging will begin once the total number of samples seen reaches\naverage. So ``average=10`` will begin averaging after seeing 10\nsamples." @@ -151905,7 +151332,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -152358,7 +151785,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDOneClassSVM.decision_function.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -152371,7 +151798,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDOneClassSVM.decision_function.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix}, shape (n_samples, n_features)", "description": "Testing data." @@ -152392,7 +151819,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Signed distance to the separating hyperplane.\n\nSigned distance is positive for an inlier and negative for an\noutlier.", "docstring": "Signed distance to the separating hyperplane.\n\n Signed distance is positive for an inlier and negative for an\n outlier.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n Testing data.\n\n Returns\n -------\n dec : array-like, shape (n_samples,)\n Decision function values of the samples.\n " }, @@ -152408,7 +151835,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDOneClassSVM.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -152421,7 +151848,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDOneClassSVM.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix}, shape (n_samples, n_features)", "description": "Training data." @@ -152446,7 +151873,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDOneClassSVM.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -152462,7 +151889,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDOneClassSVM.fit.coef_init", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array, shape (n_classes, n_features)", "description": "The initial coefficients to warm-start the optimization." @@ -152487,7 +151914,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDOneClassSVM.fit.offset_init", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array, shape (n_classes,)", "description": "The initial offset to warm-start the optimization." @@ -152512,7 +151939,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDOneClassSVM.fit.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like, shape (n_samples,), optional", "description": "Weights applied to individual samples.\nIf not provided, uniform weights are assumed. These weights will\nbe multiplied with class_weight (passed through the\nconstructor) if class_weight is specified." @@ -152537,7 +151964,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit linear One-Class SVM with Stochastic Gradient Descent.\n\nThis solves an equivalent optimization problem of the\nOne-Class SVM primal optimization problem and returns a weight vector\nw and an offset rho such that the decision function is given by\n - rho.", "docstring": "Fit linear One-Class SVM with Stochastic Gradient Descent.\n\n This solves an equivalent optimization problem of the\n One-Class SVM primal optimization problem and returns a weight vector\n w and an offset rho such that the decision function is given by\n - rho.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n Training data.\n y : Ignored\n Not used, present for API consistency by convention.\n\n coef_init : array, shape (n_classes, n_features)\n The initial coefficients to warm-start the optimization.\n\n offset_init : array, shape (n_classes,)\n The initial offset to warm-start the optimization.\n\n sample_weight : array-like, shape (n_samples,), optional\n Weights applied to individual samples.\n If not provided, uniform weights are assumed. These weights will\n be multiplied with class_weight (passed through the\n constructor) if class_weight is specified.\n\n Returns\n -------\n self : object\n Returns a fitted instance of self.\n " }, @@ -152553,7 +151980,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDOneClassSVM.partial_fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -152566,7 +151993,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDOneClassSVM.partial_fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix}, shape (n_samples, n_features)", "description": "Subset of the training data." @@ -152591,7 +152018,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDOneClassSVM.partial_fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -152607,7 +152034,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDOneClassSVM.partial_fit.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like, shape (n_samples,), optional", "description": "Weights applied to individual samples.\nIf not provided, uniform weights are assumed." @@ -152632,7 +152059,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit linear One-Class SVM with Stochastic Gradient Descent.", "docstring": "Fit linear One-Class SVM with Stochastic Gradient Descent.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n Subset of the training data.\n y : Ignored\n Not used, present for API consistency by convention.\n\n sample_weight : array-like, shape (n_samples,), optional\n Weights applied to individual samples.\n If not provided, uniform weights are assumed.\n\n Returns\n -------\n self : object\n Returns a fitted instance of self.\n " }, @@ -152648,7 +152075,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDOneClassSVM.predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -152661,7 +152088,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDOneClassSVM.predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix}, shape (n_samples, n_features)", "description": "Testing data." @@ -152682,7 +152109,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Return labels (1 inlier, -1 outlier) of the samples.", "docstring": "Return labels (1 inlier, -1 outlier) of the samples.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n Testing data.\n\n Returns\n -------\n y : array, shape (n_samples,)\n Labels of the samples.\n " }, @@ -152698,7 +152125,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDOneClassSVM.score_samples.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -152711,7 +152138,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDOneClassSVM.score_samples.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix}, shape (n_samples, n_features)", "description": "Testing data." @@ -152732,7 +152159,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Raw scoring function of the samples.", "docstring": "Raw scoring function of the samples.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n Testing data.\n\n Returns\n -------\n score_samples : array-like, shape (n_samples,)\n Unshiffted scoring function values of the samples.\n " }, @@ -152748,7 +152175,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDRegressor.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -152761,7 +152188,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDRegressor.__init__.loss", "default_value": "'squared_error'", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "str, default='squared_error'", "description": "The loss function to be used. The possible values are 'squared_error',\n'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'\n\nThe 'squared_error' refers to the ordinary least squares fit.\n'huber' modifies 'squared_error' to focus less on getting outliers\ncorrect by switching from squared to linear loss past a distance of\nepsilon. 'epsilon_insensitive' ignores errors less than epsilon and is\nlinear past that; this is the loss function used in SVR.\n'squared_epsilon_insensitive' is the same but becomes squared loss past\na tolerance of epsilon.\n\nMore details about the losses formulas can be found in the\n:ref:`User Guide `.\n\n.. deprecated:: 1.0\n The loss 'squared_loss' was deprecated in v1.0 and will be removed\n in version 1.2. Use `loss='squared_error'` which is equivalent." @@ -152777,14 +152204,14 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDRegressor.__init__.penalty", "default_value": "'l2'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'l2', 'l1', 'elasticnet'}, default='l2'", "description": "The penalty (aka regularization term) to be used. Defaults to 'l2'\nwhich is the standard regularizer for linear SVM models. 'l1' and\n'elasticnet' might bring sparsity to the model (feature selection)\nnot achievable with 'l2'." }, "type": { "kind": "EnumType", - "values": ["elasticnet", "l2", "l1"] + "values": ["l2", "l1", "elasticnet"] } }, { @@ -152793,7 +152220,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDRegressor.__init__.alpha", "default_value": "0.0001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0001", "description": "Constant that multiplies the regularization term. The higher the\nvalue, the stronger the regularization.\nAlso used to compute the learning rate when set to `learning_rate` is\nset to 'optimal'." @@ -152809,7 +152236,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDRegressor.__init__.l1_ratio", "default_value": "0.15", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.15", "description": "The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.\nl1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.\nOnly used if `penalty` is 'elasticnet'." @@ -152825,7 +152252,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDRegressor.__init__.fit_intercept", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether the intercept should be estimated or not. If False, the\ndata is assumed to be already centered." @@ -152841,7 +152268,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDRegressor.__init__.max_iter", "default_value": "1000", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1000", "description": "The maximum number of passes over the training data (aka epochs).\nIt only impacts the behavior in the ``fit`` method, and not the\n:meth:`partial_fit` method.\n\n.. versionadded:: 0.19" @@ -152857,7 +152284,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDRegressor.__init__.tol", "default_value": "0.001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-3", "description": "The stopping criterion. If it is not None, training will stop\nwhen (loss > best_loss - tol) for ``n_iter_no_change`` consecutive\nepochs.\nConvergence is checked against the training loss or the\nvalidation loss depending on the `early_stopping` parameter.\n\n.. versionadded:: 0.19" @@ -152873,7 +152300,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDRegressor.__init__.shuffle", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether or not the training data should be shuffled after each epoch." @@ -152889,7 +152316,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDRegressor.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "The verbosity level." @@ -152905,7 +152332,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDRegressor.__init__.epsilon", "default_value": "DEFAULT_EPSILON", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.1", "description": "Epsilon in the epsilon-insensitive loss functions; only if `loss` is\n'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.\nFor 'huber', determines the threshold at which it becomes less\nimportant to get the prediction exactly right.\nFor epsilon-insensitive, any differences between the current prediction\nand the correct label are ignored if they are less than this threshold." @@ -152921,7 +152348,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDRegressor.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance, default=None", "description": "Used for shuffling the data, when ``shuffle`` is set to ``True``.\nPass an int for reproducible output across multiple function calls.\nSee :term:`Glossary `." @@ -152946,7 +152373,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDRegressor.__init__.learning_rate", "default_value": "'invscaling'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str, default='invscaling'", "description": "The learning rate schedule:\n\n- 'constant': `eta = eta0`\n- 'optimal': `eta = 1.0 / (alpha * (t + t0))`\n where t0 is chosen by a heuristic proposed by Leon Bottou.\n- 'invscaling': `eta = eta0 / pow(t, power_t)`\n- 'adaptive': eta = eta0, as long as the training keeps decreasing.\n Each time n_iter_no_change consecutive epochs fail to decrease the\n training loss by tol or fail to increase validation score by tol if\n early_stopping is True, the current learning rate is divided by 5.\n\n .. versionadded:: 0.20\n Added 'adaptive' option" @@ -152962,7 +152389,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDRegressor.__init__.eta0", "default_value": "0.01", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.01", "description": "The initial learning rate for the 'constant', 'invscaling' or\n'adaptive' schedules. The default value is 0.01." @@ -152978,7 +152405,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDRegressor.__init__.power_t", "default_value": "0.25", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.25", "description": "The exponent for inverse scaling learning rate." @@ -152994,7 +152421,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDRegressor.__init__.early_stopping", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Whether to use early stopping to terminate training when validation\nscore is not improving. If set to True, it will automatically set aside\na fraction of training data as validation and terminate\ntraining when validation score returned by the `score` method is not\nimproving by at least `tol` for `n_iter_no_change` consecutive\nepochs.\n\n.. versionadded:: 0.20\n Added 'early_stopping' option" @@ -153010,7 +152437,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDRegressor.__init__.validation_fraction", "default_value": "0.1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.1", "description": "The proportion of training data to set aside as validation set for\nearly stopping. Must be between 0 and 1.\nOnly used if `early_stopping` is True.\n\n.. versionadded:: 0.20\n Added 'validation_fraction' option" @@ -153026,7 +152453,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDRegressor.__init__.n_iter_no_change", "default_value": "5", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=5", "description": "Number of iterations with no improvement to wait before stopping\nfitting.\nConvergence is checked against the training loss or the\nvalidation loss depending on the `early_stopping` parameter.\n\n.. versionadded:: 0.20\n Added 'n_iter_no_change' option" @@ -153042,7 +152469,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDRegressor.__init__.warm_start", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "When set to True, reuse the solution of the previous call to fit as\ninitialization, otherwise, just erase the previous solution.\nSee :term:`the Glossary `.\n\nRepeatedly calling fit or partial_fit when warm_start is True can\nresult in a different solution than when calling fit a single time\nbecause of the way the data is shuffled.\nIf a dynamic learning rate is used, the learning rate is adapted\ndepending on the number of samples already seen. Calling ``fit`` resets\nthis counter, while ``partial_fit`` will result in increasing the\nexisting counter." @@ -153058,7 +152485,7 @@ "qname": "sklearn.linear_model._stochastic_gradient.SGDRegressor.__init__.average", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool or int, default=False", "description": "When set to True, computes the averaged SGD weights across all\nupdates and stores the result in the ``coef_`` attribute. If set to\nan int greater than 1, averaging will begin once the total number of\nsamples seen reaches `average`. So ``average=10`` will begin\naveraging after seeing 10 samples." @@ -153079,7 +152506,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -153581,7 +153008,7 @@ "qname": "sklearn.linear_model._theil_sen.TheilSenRegressor.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -153594,7 +153021,7 @@ "qname": "sklearn.linear_model._theil_sen.TheilSenRegressor.__init__.fit_intercept", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to calculate the intercept for this model. If set\nto false, no intercept will be used in calculations." @@ -153610,7 +153037,7 @@ "qname": "sklearn.linear_model._theil_sen.TheilSenRegressor.__init__.copy_X", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If True, X will be copied; else, it may be overwritten." @@ -153626,7 +153053,7 @@ "qname": "sklearn.linear_model._theil_sen.TheilSenRegressor.__init__.max_subpopulation", "default_value": "10000.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1e4", "description": "Instead of computing with a set of cardinality 'n choose k', where n is\nthe number of samples and k is the number of subsamples (at least\nnumber of features), consider only a stochastic subpopulation of a\ngiven maximal size if 'n choose k' is larger than max_subpopulation.\nFor other than small problem sizes this parameter will determine\nmemory usage and runtime if n_subsamples is not changed. Note that the\ndata type should be int but floats such as 1e4 can be accepted too." @@ -153642,7 +153069,7 @@ "qname": "sklearn.linear_model._theil_sen.TheilSenRegressor.__init__.n_subsamples", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Number of samples to calculate the parameters. This is at least the\nnumber of features (plus 1 if fit_intercept=True) and the number of\nsamples as a maximum. A lower number leads to a higher breakdown\npoint and a low efficiency while a high number leads to a low\nbreakdown point and a high efficiency. If None, take the\nminimum number of subsamples leading to maximal robustness.\nIf n_subsamples is set to n_samples, Theil-Sen is identical to least\nsquares." @@ -153658,7 +153085,7 @@ "qname": "sklearn.linear_model._theil_sen.TheilSenRegressor.__init__.max_iter", "default_value": "300", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=300", "description": "Maximum number of iterations for the calculation of spatial median." @@ -153674,7 +153101,7 @@ "qname": "sklearn.linear_model._theil_sen.TheilSenRegressor.__init__.tol", "default_value": "0.001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-3", "description": "Tolerance when calculating spatial median." @@ -153690,7 +153117,7 @@ "qname": "sklearn.linear_model._theil_sen.TheilSenRegressor.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "A random number generator instance to define the state of the random\npermutations generator. Pass an int for reproducible output across\nmultiple function calls.\nSee :term:`Glossary `." @@ -153719,7 +153146,7 @@ "qname": "sklearn.linear_model._theil_sen.TheilSenRegressor.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Number of CPUs to use during the cross validation.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details." @@ -153735,7 +153162,7 @@ "qname": "sklearn.linear_model._theil_sen.TheilSenRegressor.__init__.verbose", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Verbose mode when fitting the model." @@ -153747,7 +153174,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -153814,7 +153241,7 @@ "qname": "sklearn.linear_model._theil_sen.TheilSenRegressor.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -153827,7 +153254,7 @@ "qname": "sklearn.linear_model._theil_sen.TheilSenRegressor.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (n_samples, n_features)", "description": "Training data." @@ -153843,7 +153270,7 @@ "qname": "sklearn.linear_model._theil_sen.TheilSenRegressor.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (n_samples,)", "description": "Target values." @@ -153855,7 +153282,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit linear model.", "docstring": "Fit linear model.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Training data.\n y : ndarray of shape (n_samples,)\n Target values.\n\n Returns\n -------\n self : returns an instance of self.\n Fitted `TheilSenRegressor` estimator.\n " }, @@ -154133,7 +153560,7 @@ "qname": "sklearn.manifold._isomap.Isomap.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -154146,7 +153573,7 @@ "qname": "sklearn.manifold._isomap.Isomap.__init__.n_neighbors", "default_value": "5", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or None, default=5", "description": "Number of neighbors to consider for each point. If `n_neighbors` is an int,\nthen `radius` must be `None`." @@ -154171,7 +153598,7 @@ "qname": "sklearn.manifold._isomap.Isomap.__init__.radius", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float or None, default=None", "description": "Limiting distance of neighbors to return. If `radius` is a float,\nthen `n_neighbors` must be set to `None`.\n\n.. versionadded:: 1.1" @@ -154196,7 +153623,7 @@ "qname": "sklearn.manifold._isomap.Isomap.__init__.n_components", "default_value": "2", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=2", "description": "Number of coordinates for the manifold." @@ -154212,14 +153639,14 @@ "qname": "sklearn.manifold._isomap.Isomap.__init__.eigen_solver", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'auto', 'arpack', 'dense'}, default='auto'", "description": "'auto' : Attempt to choose the most efficient solver\nfor the given problem.\n\n'arpack' : Use Arnoldi decomposition to find the eigenvalues\nand eigenvectors.\n\n'dense' : Use a direct solver (i.e. LAPACK)\nfor the eigenvalue decomposition." }, "type": { "kind": "EnumType", - "values": ["auto", "arpack", "dense"] + "values": ["arpack", "auto", "dense"] } }, { @@ -154228,7 +153655,7 @@ "qname": "sklearn.manifold._isomap.Isomap.__init__.tol", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0", "description": "Convergence tolerance passed to arpack or lobpcg.\nnot used if eigen_solver == 'dense'." @@ -154244,7 +153671,7 @@ "qname": "sklearn.manifold._isomap.Isomap.__init__.max_iter", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Maximum number of iterations for the arpack solver.\nnot used if eigen_solver == 'dense'." @@ -154260,14 +153687,14 @@ "qname": "sklearn.manifold._isomap.Isomap.__init__.path_method", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'auto', 'FW', 'D'}, default='auto'", "description": "Method to use in finding shortest path.\n\n'auto' : attempt to choose the best algorithm automatically.\n\n'FW' : Floyd-Warshall algorithm.\n\n'D' : Dijkstra's algorithm." }, "type": { "kind": "EnumType", - "values": ["FW", "auto", "D"] + "values": ["auto", "FW", "D"] } }, { @@ -154276,14 +153703,14 @@ "qname": "sklearn.manifold._isomap.Isomap.__init__.neighbors_algorithm", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'auto', 'brute', 'kd_tree', 'ball_tree'}, default='auto'", "description": "Algorithm to use for nearest neighbors search,\npassed to neighbors.NearestNeighbors instance." }, "type": { "kind": "EnumType", - "values": ["auto", "kd_tree", "ball_tree", "brute"] + "values": ["auto", "kd_tree", "brute", "ball_tree"] } }, { @@ -154292,7 +153719,7 @@ "qname": "sklearn.manifold._isomap.Isomap.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or None, default=None", "description": "The number of parallel jobs to run.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details." @@ -154317,7 +153744,7 @@ "qname": "sklearn.manifold._isomap.Isomap.__init__.metric", "default_value": "'minkowski'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str, or callable, default=\"minkowski\"", "description": "The metric to use when calculating distance between instances in a\nfeature array. If metric is a string or callable, it must be one of\nthe options allowed by :func:`sklearn.metrics.pairwise_distances` for\nits metric parameter.\nIf metric is \"precomputed\", X is assumed to be a distance matrix and\nmust be square. X may be a :term:`Glossary `.\n\n.. versionadded:: 0.22" @@ -154342,7 +153769,7 @@ "qname": "sklearn.manifold._isomap.Isomap.__init__.p", "default_value": "2", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=2", "description": "Parameter for the Minkowski metric from\nsklearn.metrics.pairwise.pairwise_distances. When p = 1, this is\nequivalent to using manhattan_distance (l1), and euclidean_distance\n(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.\n\n.. versionadded:: 0.22" @@ -154358,7 +153785,7 @@ "qname": "sklearn.manifold._isomap.Isomap.__init__.metric_params", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict, default=None", "description": "Additional keyword arguments for the metric function.\n\n.. versionadded:: 0.22" @@ -154370,7 +153797,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -154424,7 +153851,7 @@ "qname": "sklearn.manifold._isomap.Isomap.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -154437,7 +153864,7 @@ "qname": "sklearn.manifold._isomap.Isomap.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse graph, BallTree, KDTree, NearestNeighbors}", "description": "Sample data, shape = (n_samples, n_features), in the form of a\nnumpy array, sparse graph, precomputed tree, or NearestNeighbors\nobject." @@ -154453,7 +153880,7 @@ "qname": "sklearn.manifold._isomap.Isomap.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -154465,7 +153892,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute the embedding vectors for data X.", "docstring": "Compute the embedding vectors for data X.\n\n Parameters\n ----------\n X : {array-like, sparse graph, BallTree, KDTree, NearestNeighbors}\n Sample data, shape = (n_samples, n_features), in the form of a\n numpy array, sparse graph, precomputed tree, or NearestNeighbors\n object.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n self : object\n Returns a fitted instance of self.\n " }, @@ -154481,7 +153908,7 @@ "qname": "sklearn.manifold._isomap.Isomap.fit_transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -154494,7 +153921,7 @@ "qname": "sklearn.manifold._isomap.Isomap.fit_transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse graph, BallTree, KDTree}", "description": "Training vector, where `n_samples` is the number of samples\nand `n_features` is the number of features." @@ -154510,7 +153937,7 @@ "qname": "sklearn.manifold._isomap.Isomap.fit_transform.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -154522,7 +153949,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the model from data in X and transform X.", "docstring": "Fit the model from data in X and transform X.\n\n Parameters\n ----------\n X : {array-like, sparse graph, BallTree, KDTree}\n Training vector, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n X_new : array-like, shape (n_samples, n_components)\n X transformed in the new space.\n " }, @@ -154538,7 +153965,7 @@ "qname": "sklearn.manifold._isomap.Isomap.reconstruction_error.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -154547,7 +153974,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute the reconstruction error for the embedding.", "docstring": "Compute the reconstruction error for the embedding.\n\n Returns\n -------\n reconstruction_error : float\n Reconstruction error.\n\n Notes\n -----\n The cost function of an isomap embedding is\n\n ``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``\n\n Where D is the matrix of distances for the input data X,\n D_fit is the matrix of distances for the output embedding X_fit,\n and K is the isomap kernel:\n\n ``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``\n " }, @@ -154563,7 +153990,7 @@ "qname": "sklearn.manifold._isomap.Isomap.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -154576,7 +154003,7 @@ "qname": "sklearn.manifold._isomap.Isomap.transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like, shape (n_queries, n_features)", "description": "If neighbors_algorithm='precomputed', X is assumed to be a\ndistance matrix or a sparse graph of shape\n(n_queries, n_samples_fit)." @@ -154597,7 +154024,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Transform X.\n\nThis is implemented by linking the points X into the graph of geodesic\ndistances of the training data. First the `n_neighbors` nearest\nneighbors of X are found in the training data, and from these the\nshortest geodesic distances from each point in X to each point in\nthe training data are computed in order to construct the kernel.\nThe embedding of X is the projection of this kernel onto the\nembedding vectors of the training set.", "docstring": "Transform X.\n\n This is implemented by linking the points X into the graph of geodesic\n distances of the training data. First the `n_neighbors` nearest\n neighbors of X are found in the training data, and from these the\n shortest geodesic distances from each point in X to each point in\n the training data are computed in order to construct the kernel.\n The embedding of X is the projection of this kernel onto the\n embedding vectors of the training set.\n\n Parameters\n ----------\n X : array-like, shape (n_queries, n_features)\n If neighbors_algorithm='precomputed', X is assumed to be a\n distance matrix or a sparse graph of shape\n (n_queries, n_samples_fit).\n\n Returns\n -------\n X_new : array-like, shape (n_queries, n_components)\n X transformed in the new space.\n " }, @@ -154613,7 +154040,7 @@ "qname": "sklearn.manifold._locally_linear.LocallyLinearEmbedding.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -154626,7 +154053,7 @@ "qname": "sklearn.manifold._locally_linear.LocallyLinearEmbedding.__init__.n_neighbors", "default_value": "5", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=5", "description": "Number of neighbors to consider for each point." @@ -154642,7 +154069,7 @@ "qname": "sklearn.manifold._locally_linear.LocallyLinearEmbedding.__init__.n_components", "default_value": "2", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=2", "description": "Number of coordinates for the manifold." @@ -154658,7 +154085,7 @@ "qname": "sklearn.manifold._locally_linear.LocallyLinearEmbedding.__init__.reg", "default_value": "0.001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-3", "description": "Regularization constant, multiplies the trace of the local covariance\nmatrix of the distances." @@ -154674,14 +154101,14 @@ "qname": "sklearn.manifold._locally_linear.LocallyLinearEmbedding.__init__.eigen_solver", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'auto', 'arpack', 'dense'}, default='auto'", "description": "The solver used to compute the eigenvectors. The available options are:\n\n- `'auto'` : algorithm will attempt to choose the best method for input\n data.\n- `'arpack'` : use arnoldi iteration in shift-invert mode. For this\n method, M may be a dense matrix, sparse matrix, or general linear\n operator.\n- `'dense'` : use standard dense matrix operations for the eigenvalue\n decomposition. For this method, M must be an array or matrix type.\n This method should be avoided for large problems.\n\n.. warning::\n ARPACK can be unstable for some problems. It is best to try several\n random seeds in order to check results." }, "type": { "kind": "EnumType", - "values": ["auto", "arpack", "dense"] + "values": ["arpack", "auto", "dense"] } }, { @@ -154690,7 +154117,7 @@ "qname": "sklearn.manifold._locally_linear.LocallyLinearEmbedding.__init__.tol", "default_value": "1e-06", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-6", "description": "Tolerance for 'arpack' method\nNot used if eigen_solver=='dense'." @@ -154706,7 +154133,7 @@ "qname": "sklearn.manifold._locally_linear.LocallyLinearEmbedding.__init__.max_iter", "default_value": "100", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=100", "description": "Maximum number of iterations for the arpack solver.\nNot used if eigen_solver=='dense'." @@ -154722,14 +154149,14 @@ "qname": "sklearn.manifold._locally_linear.LocallyLinearEmbedding.__init__.method", "default_value": "'standard'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'standard', 'hessian', 'modified', 'ltsa'}, default='standard'", "description": "- `standard`: use the standard locally linear embedding algorithm. see\n reference [1]_\n- `hessian`: use the Hessian eigenmap method. This method requires\n ``n_neighbors > n_components * (1 + (n_components + 1) / 2``. see\n reference [2]_\n- `modified`: use the modified locally linear embedding algorithm.\n see reference [3]_\n- `ltsa`: use local tangent space alignment algorithm. see\n reference [4]_" }, "type": { "kind": "EnumType", - "values": ["modified", "hessian", "standard", "ltsa"] + "values": ["ltsa", "standard", "hessian", "modified"] } }, { @@ -154738,7 +154165,7 @@ "qname": "sklearn.manifold._locally_linear.LocallyLinearEmbedding.__init__.hessian_tol", "default_value": "0.0001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-4", "description": "Tolerance for Hessian eigenmapping method.\nOnly used if ``method == 'hessian'``." @@ -154754,7 +154181,7 @@ "qname": "sklearn.manifold._locally_linear.LocallyLinearEmbedding.__init__.modified_tol", "default_value": "1e-12", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-12", "description": "Tolerance for modified LLE method.\nOnly used if ``method == 'modified'``." @@ -154770,14 +154197,14 @@ "qname": "sklearn.manifold._locally_linear.LocallyLinearEmbedding.__init__.neighbors_algorithm", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'auto', 'brute', 'kd_tree', 'ball_tree'}, default='auto'", "description": "Algorithm to use for nearest neighbors search, passed to\n:class:`~sklearn.neighbors.NearestNeighbors` instance." }, "type": { "kind": "EnumType", - "values": ["auto", "kd_tree", "ball_tree", "brute"] + "values": ["auto", "kd_tree", "brute", "ball_tree"] } }, { @@ -154786,7 +154213,7 @@ "qname": "sklearn.manifold._locally_linear.LocallyLinearEmbedding.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance, default=None", "description": "Determines the random number generator when\n``eigen_solver`` == 'arpack'. Pass an int for reproducible results\nacross multiple function calls. See :term:`Glossary `." @@ -154811,7 +154238,7 @@ "qname": "sklearn.manifold._locally_linear.LocallyLinearEmbedding.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or None, default=None", "description": "The number of parallel jobs to run.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details." @@ -154832,7 +154259,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -154886,7 +154313,7 @@ "qname": "sklearn.manifold._locally_linear.LocallyLinearEmbedding.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -154899,7 +154326,7 @@ "qname": "sklearn.manifold._locally_linear.LocallyLinearEmbedding.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training set." @@ -154915,7 +154342,7 @@ "qname": "sklearn.manifold._locally_linear.LocallyLinearEmbedding.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present here for API consistency by convention." @@ -154927,7 +154354,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute the embedding vectors for data X.", "docstring": "Compute the embedding vectors for data X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training set.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n self : object\n Fitted `LocallyLinearEmbedding` class instance.\n " }, @@ -154943,7 +154370,7 @@ "qname": "sklearn.manifold._locally_linear.LocallyLinearEmbedding.fit_transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -154956,7 +154383,7 @@ "qname": "sklearn.manifold._locally_linear.LocallyLinearEmbedding.fit_transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training set." @@ -154972,7 +154399,7 @@ "qname": "sklearn.manifold._locally_linear.LocallyLinearEmbedding.fit_transform.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present here for API consistency by convention." @@ -154984,7 +154411,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute the embedding vectors for data X and transform X.", "docstring": "Compute the embedding vectors for data X and transform X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training set.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n X_new : array-like, shape (n_samples, n_components)\n Returns the instance itself.\n " }, @@ -155000,7 +154427,7 @@ "qname": "sklearn.manifold._locally_linear.LocallyLinearEmbedding.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -155013,7 +154440,7 @@ "qname": "sklearn.manifold._locally_linear.LocallyLinearEmbedding.transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training set." @@ -155025,7 +154452,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Transform new points into embedding space.", "docstring": "\n Transform new points into embedding space.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training set.\n\n Returns\n -------\n X_new : ndarray of shape (n_samples, n_components)\n Returns the instance itself.\n\n Notes\n -----\n Because of scaling performed by this method, it is discouraged to use\n it together with methods that are not scale-invariant (like SVMs).\n " }, @@ -155300,7 +154727,7 @@ }, "type": { "kind": "EnumType", - "values": ["auto", "arpack", "dense"] + "values": ["arpack", "auto", "dense"] } }, { @@ -155348,7 +154775,7 @@ }, "type": { "kind": "EnumType", - "values": ["modified", "hessian", "standard", "ltsa"] + "values": ["ltsa", "standard", "hessian", "modified"] } }, { @@ -155506,7 +154933,7 @@ }, "type": { "kind": "EnumType", - "values": ["auto", "arpack", "dense"] + "values": ["arpack", "auto", "dense"] } }, { @@ -155584,7 +155011,7 @@ "qname": "sklearn.manifold._mds.MDS.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -155597,7 +155024,7 @@ "qname": "sklearn.manifold._mds.MDS.__init__.n_components", "default_value": "2", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=2", "description": "Number of dimensions in which to immerse the dissimilarities." @@ -155613,7 +155040,7 @@ "qname": "sklearn.manifold._mds.MDS.__init__.metric", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If ``True``, perform metric MDS; otherwise, perform nonmetric MDS." @@ -155629,7 +155056,7 @@ "qname": "sklearn.manifold._mds.MDS.__init__.n_init", "default_value": "4", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=4", "description": "Number of times the SMACOF algorithm will be run with different\ninitializations. The final results will be the best output of the runs,\ndetermined by the run with the smallest final stress." @@ -155645,7 +155072,7 @@ "qname": "sklearn.manifold._mds.MDS.__init__.max_iter", "default_value": "300", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=300", "description": "Maximum number of iterations of the SMACOF algorithm for a single run." @@ -155661,7 +155088,7 @@ "qname": "sklearn.manifold._mds.MDS.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "Level of verbosity." @@ -155677,7 +155104,7 @@ "qname": "sklearn.manifold._mds.MDS.__init__.eps", "default_value": "0.001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-3", "description": "Relative tolerance with respect to stress at which to declare\nconvergence." @@ -155693,7 +155120,7 @@ "qname": "sklearn.manifold._mds.MDS.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "The number of jobs to use for the computation. If multiple\ninitializations are used (``n_init``), each run of the algorithm is\ncomputed in parallel.\n\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details." @@ -155709,7 +155136,7 @@ "qname": "sklearn.manifold._mds.MDS.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Determines the random number generator used to initialize the centers.\nPass an int for reproducible results across multiple function calls.\nSee :term:`Glossary `." @@ -155738,7 +155165,7 @@ "qname": "sklearn.manifold._mds.MDS.__init__.dissimilarity", "default_value": "'euclidean'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'euclidean', 'precomputed'}, default='euclidean'", "description": "Dissimilarity measure to use:\n\n- 'euclidean':\n Pairwise Euclidean distances between points in the dataset.\n\n- 'precomputed':\n Pre-computed dissimilarities are passed directly to ``fit`` and\n ``fit_transform``." @@ -155750,7 +155177,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -155791,7 +155218,7 @@ "qname": "sklearn.manifold._mds.MDS.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -155804,7 +155231,7 @@ "qname": "sklearn.manifold._mds.MDS.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features) or (n_samples, n_samples)", "description": "Input data. If ``dissimilarity=='precomputed'``, the input should\nbe the dissimilarity matrix." @@ -155820,7 +155247,7 @@ "qname": "sklearn.manifold._mds.MDS.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -155836,7 +155263,7 @@ "qname": "sklearn.manifold._mds.MDS.fit.init", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (n_samples,), default=None", "description": "Starting configuration of the embedding to initialize the SMACOF\nalgorithm. By default, the algorithm is initialized with a randomly\nchosen array." @@ -155848,7 +155275,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute the position of the points in the embedding space.", "docstring": "\n Compute the position of the points in the embedding space.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features) or (n_samples, n_samples)\n Input data. If ``dissimilarity=='precomputed'``, the input should\n be the dissimilarity matrix.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n init : ndarray of shape (n_samples,), default=None\n Starting configuration of the embedding to initialize the SMACOF\n algorithm. By default, the algorithm is initialized with a randomly\n chosen array.\n\n Returns\n -------\n self : object\n Fitted estimator.\n " }, @@ -155864,7 +155291,7 @@ "qname": "sklearn.manifold._mds.MDS.fit_transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -155877,7 +155304,7 @@ "qname": "sklearn.manifold._mds.MDS.fit_transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features) or (n_samples, n_samples)", "description": "Input data. If ``dissimilarity=='precomputed'``, the input should\nbe the dissimilarity matrix." @@ -155893,7 +155320,7 @@ "qname": "sklearn.manifold._mds.MDS.fit_transform.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -155909,7 +155336,7 @@ "qname": "sklearn.manifold._mds.MDS.fit_transform.init", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (n_samples,), default=None", "description": "Starting configuration of the embedding to initialize the SMACOF\nalgorithm. By default, the algorithm is initialized with a randomly\nchosen array." @@ -155921,7 +155348,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the data from `X`, and returns the embedded coordinates.", "docstring": "\n Fit the data from `X`, and returns the embedded coordinates.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features) or (n_samples, n_samples)\n Input data. If ``dissimilarity=='precomputed'``, the input should\n be the dissimilarity matrix.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n init : ndarray of shape (n_samples,), default=None\n Starting configuration of the embedding to initialize the SMACOF\n algorithm. By default, the algorithm is initialized with a randomly\n chosen array.\n\n Returns\n -------\n X_new : ndarray of shape (n_samples, n_components)\n X transformed in the new space.\n " }, @@ -156291,7 +155718,7 @@ "qname": "sklearn.manifold._spectral_embedding.SpectralEmbedding.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -156304,7 +155731,7 @@ "qname": "sklearn.manifold._spectral_embedding.SpectralEmbedding.__init__.n_components", "default_value": "2", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=2", "description": "The dimension of the projected subspace." @@ -156320,7 +155747,7 @@ "qname": "sklearn.manifold._spectral_embedding.SpectralEmbedding.__init__.affinity", "default_value": "'nearest_neighbors'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'nearest_neighbors', 'rbf', 'precomputed', 'precomputed_nearest_neighbors'} or callable, default='nearest_neighbors'", "description": "How to construct the affinity matrix.\n - 'nearest_neighbors' : construct the affinity matrix by computing a\n graph of nearest neighbors.\n - 'rbf' : construct the affinity matrix by computing a radial basis\n function (RBF) kernel.\n - 'precomputed' : interpret ``X`` as a precomputed affinity matrix.\n - 'precomputed_nearest_neighbors' : interpret ``X`` as a sparse graph\n of precomputed nearest neighbors, and constructs the affinity matrix\n by selecting the ``n_neighbors`` nearest neighbors.\n - callable : use passed in function as affinity\n the function takes in data matrix (n_samples, n_features)\n and return affinity matrix (n_samples, n_samples)." @@ -156330,7 +155757,7 @@ "types": [ { "kind": "EnumType", - "values": ["nearest_neighbors", "precomputed_nearest_neighbors", "precomputed", "rbf"] + "values": ["nearest_neighbors", "precomputed", "rbf", "precomputed_nearest_neighbors"] }, { "kind": "NamedType", @@ -156345,7 +155772,7 @@ "qname": "sklearn.manifold._spectral_embedding.SpectralEmbedding.__init__.gamma", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=None", "description": "Kernel coefficient for rbf kernel. If None, gamma will be set to\n1/n_features." @@ -156361,7 +155788,7 @@ "qname": "sklearn.manifold._spectral_embedding.SpectralEmbedding.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "A pseudo random number generator used for the initialization\nof the lobpcg eigen vectors decomposition when `eigen_solver ==\n'amg'`, and for the K-Means initialization. Use an int to make\nthe results deterministic across calls (See\n:term:`Glossary `).\n\n.. note::\n When using `eigen_solver == 'amg'`,\n it is necessary to also fix the global numpy seed with\n `np.random.seed(int)` to get deterministic results. See\n https://github.com/pyamg/pyamg/issues/139 for further\n information." @@ -156390,14 +155817,14 @@ "qname": "sklearn.manifold._spectral_embedding.SpectralEmbedding.__init__.eigen_solver", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'arpack', 'lobpcg', 'amg'}, default=None", "description": "The eigenvalue decomposition strategy to use. AMG requires pyamg\nto be installed. It can be faster on very large, sparse problems.\nIf None, then ``'arpack'`` is used." }, "type": { "kind": "EnumType", - "values": ["arpack", "lobpcg", "amg"] + "values": ["arpack", "amg", "lobpcg"] } }, { @@ -156406,7 +155833,7 @@ "qname": "sklearn.manifold._spectral_embedding.SpectralEmbedding.__init__.n_neighbors", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Number of nearest neighbors for nearest_neighbors graph building.\nIf None, n_neighbors will be set to max(n_samples/10, 1)." @@ -156422,7 +155849,7 @@ "qname": "sklearn.manifold._spectral_embedding.SpectralEmbedding.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "The number of parallel jobs to run.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details." @@ -156434,7 +155861,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -156526,7 +155953,7 @@ "qname": "sklearn.manifold._spectral_embedding.SpectralEmbedding.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -156539,7 +155966,7 @@ "qname": "sklearn.manifold._spectral_embedding.SpectralEmbedding.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Training vector, where `n_samples` is the number of samples\nand `n_features` is the number of features.\n\nIf affinity is \"precomputed\"\nX : {array-like, sparse matrix}, shape (n_samples, n_samples),\nInterpret X as precomputed adjacency graph computed from\nsamples." @@ -156564,7 +155991,7 @@ "qname": "sklearn.manifold._spectral_embedding.SpectralEmbedding.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -156576,7 +156003,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the model from data in X.", "docstring": "Fit the model from data in X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vector, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n If affinity is \"precomputed\"\n X : {array-like, sparse matrix}, shape (n_samples, n_samples),\n Interpret X as precomputed adjacency graph computed from\n samples.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n " }, @@ -156592,7 +156019,7 @@ "qname": "sklearn.manifold._spectral_embedding.SpectralEmbedding.fit_transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -156605,7 +156032,7 @@ "qname": "sklearn.manifold._spectral_embedding.SpectralEmbedding.fit_transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Training vector, where `n_samples` is the number of samples\nand `n_features` is the number of features.\n\nIf affinity is \"precomputed\"\nX : {array-like, sparse matrix} of shape (n_samples, n_samples),\nInterpret X as precomputed adjacency graph computed from\nsamples." @@ -156630,7 +156057,7 @@ "qname": "sklearn.manifold._spectral_embedding.SpectralEmbedding.fit_transform.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -156642,7 +156069,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the model from data in X and transform X.", "docstring": "Fit the model from data in X and transform X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vector, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n If affinity is \"precomputed\"\n X : {array-like, sparse matrix} of shape (n_samples, n_samples),\n Interpret X as precomputed adjacency graph computed from\n samples.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n X_new : array-like of shape (n_samples, n_components)\n Spectral embedding of the training matrix.\n " }, @@ -156847,7 +156274,7 @@ }, "type": { "kind": "EnumType", - "values": ["arpack", "lobpcg", "amg"] + "values": ["arpack", "amg", "lobpcg"] } }, { @@ -156945,7 +156372,7 @@ "qname": "sklearn.manifold._t_sne.TSNE.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -156958,7 +156385,7 @@ "qname": "sklearn.manifold._t_sne.TSNE.__init__.n_components", "default_value": "2", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=2", "description": "Dimension of the embedded space." @@ -156974,7 +156401,7 @@ "qname": "sklearn.manifold._t_sne.TSNE.__init__.perplexity", "default_value": "30.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=30.0", "description": "The perplexity is related to the number of nearest neighbors that\nis used in other manifold learning algorithms. Larger datasets\nusually require a larger perplexity. Consider selecting a value\nbetween 5 and 50. Different values can result in significantly\ndifferent results." @@ -156990,7 +156417,7 @@ "qname": "sklearn.manifold._t_sne.TSNE.__init__.early_exaggeration", "default_value": "12.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=12.0", "description": "Controls how tight natural clusters in the original space are in\nthe embedded space and how much space will be between them. For\nlarger values, the space between natural clusters will be larger\nin the embedded space. Again, the choice of this parameter is not\nvery critical. If the cost function increases during initial\noptimization, the early exaggeration factor or the learning rate\nmight be too high." @@ -157006,7 +156433,7 @@ "qname": "sklearn.manifold._t_sne.TSNE.__init__.learning_rate", "default_value": "'warn'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float or 'auto', default=200.0", "description": "The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If\nthe learning rate is too high, the data may look like a 'ball' with any\npoint approximately equidistant from its nearest neighbours. If the\nlearning rate is too low, most points may look compressed in a dense\ncloud with few outliers. If the cost function gets stuck in a bad local\nminimum increasing the learning rate may help.\nNote that many other t-SNE implementations (bhtsne, FIt-SNE, openTSNE,\netc.) use a definition of learning_rate that is 4 times smaller than\nours. So our learning_rate=200 corresponds to learning_rate=800 in\nthose other implementations. The 'auto' option sets the learning_rate\nto `max(N / early_exaggeration / 4, 50)` where N is the sample size,\nfollowing [4] and [5]. This will become default in 1.2." @@ -157039,7 +156466,7 @@ "qname": "sklearn.manifold._t_sne.TSNE.__init__.n_iter", "default_value": "1000", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1000", "description": "Maximum number of iterations for the optimization. Should be at\nleast 250." @@ -157055,7 +156482,7 @@ "qname": "sklearn.manifold._t_sne.TSNE.__init__.n_iter_without_progress", "default_value": "300", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=300", "description": "Maximum number of iterations without progress before we abort the\noptimization, used after 250 initial iterations with early\nexaggeration. Note that progress is only checked every 50 iterations so\nthis value is rounded to the next multiple of 50.\n\n.. versionadded:: 0.17\n parameter *n_iter_without_progress* to control stopping criteria." @@ -157071,7 +156498,7 @@ "qname": "sklearn.manifold._t_sne.TSNE.__init__.min_grad_norm", "default_value": "1e-07", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-7", "description": "If the gradient norm is below this threshold, the optimization will\nbe stopped." @@ -157087,7 +156514,7 @@ "qname": "sklearn.manifold._t_sne.TSNE.__init__.metric", "default_value": "'euclidean'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str or callable, default='euclidean'", "description": "The metric to use when calculating distance between instances in a\nfeature array. If metric is a string, it must be one of the options\nallowed by scipy.spatial.distance.pdist for its metric parameter, or\na metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.\nIf metric is \"precomputed\", X is assumed to be a distance matrix.\nAlternatively, if metric is a callable function, it is called on each\npair of instances (rows) and the resulting value recorded. The callable\nshould take two arrays from X as input and return a value indicating\nthe distance between them. The default is \"euclidean\" which is\ninterpreted as squared euclidean distance." @@ -157112,7 +156539,7 @@ "qname": "sklearn.manifold._t_sne.TSNE.__init__.metric_params", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict, default=None", "description": "Additional keyword arguments for the metric function.\n\n.. versionadded:: 1.1" @@ -157128,7 +156555,7 @@ "qname": "sklearn.manifold._t_sne.TSNE.__init__.init", "default_value": "'warn'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'random', 'pca'} or ndarray of shape (n_samples, n_components), default='random'", "description": "Initialization of embedding. Possible options are 'random', 'pca',\nand a numpy array of shape (n_samples, n_components).\nPCA initialization cannot be used with precomputed distances and is\nusually more globally stable than random initialization. `init='pca'`\nwill become default in 1.2." @@ -157138,7 +156565,7 @@ "types": [ { "kind": "EnumType", - "values": ["pca", "random"] + "values": ["random", "pca"] }, { "kind": "NamedType", @@ -157153,7 +156580,7 @@ "qname": "sklearn.manifold._t_sne.TSNE.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "Verbosity level." @@ -157169,7 +156596,7 @@ "qname": "sklearn.manifold._t_sne.TSNE.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Determines the random number generator. Pass an int for reproducible\nresults across multiple function calls. Note that different\ninitializations might result in different local minima of the cost\nfunction. See :term:`Glossary `." @@ -157198,7 +156625,7 @@ "qname": "sklearn.manifold._t_sne.TSNE.__init__.method", "default_value": "'barnes_hut'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str, default='barnes_hut'", "description": "By default the gradient calculation algorithm uses Barnes-Hut\napproximation running in O(NlogN) time. method='exact'\nwill run on the slower, but exact, algorithm in O(N^2) time. The\nexact algorithm should be used when nearest-neighbor errors need\nto be better than 3%. However, the exact method cannot scale to\nmillions of examples.\n\n.. versionadded:: 0.17\n Approximate optimization *method* via the Barnes-Hut." @@ -157214,7 +156641,7 @@ "qname": "sklearn.manifold._t_sne.TSNE.__init__.angle", "default_value": "0.5", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.5", "description": "Only used if method='barnes_hut'\nThis is the trade-off between speed and accuracy for Barnes-Hut T-SNE.\n'angle' is the angular size (referred to as theta in [3]) of a distant\nnode as measured from a point. If this size is below 'angle' then it is\nused as a summary node of all points contained within it.\nThis method is not very sensitive to changes in this parameter\nin the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing\ncomputation time and angle greater 0.8 has quickly increasing error." @@ -157230,7 +156657,7 @@ "qname": "sklearn.manifold._t_sne.TSNE.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "The number of parallel jobs to run for neighbors search. This parameter\nhas no impact when ``metric=\"precomputed\"`` or\n(``metric=\"euclidean\"`` and ``method=\"exact\"``).\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details.\n\n.. versionadded:: 0.22" @@ -157246,7 +156673,7 @@ "qname": "sklearn.manifold._t_sne.TSNE.__init__.square_distances", "default_value": "'deprecated'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "True, default='deprecated'", "description": "This parameter has no effect since distance values are always squared\nsince 1.1.\n\n.. deprecated:: 1.1\n `square_distances` has no effect from 1.1 and will be removed in\n 1.3." @@ -157258,7 +156685,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -157453,7 +156880,7 @@ "qname": "sklearn.manifold._t_sne.TSNE.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -157466,7 +156893,7 @@ "qname": "sklearn.manifold._t_sne.TSNE.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (n_samples, n_features) or (n_samples, n_samples)", "description": "If the metric is 'precomputed' X must be a square distance\nmatrix. Otherwise it contains a sample per row. If the method\nis 'exact', X may be a sparse matrix of type 'csr', 'csc'\nor 'coo'. If the method is 'barnes_hut' and the metric is\n'precomputed', X may be a precomputed sparse graph." @@ -157482,7 +156909,7 @@ "qname": "sklearn.manifold._t_sne.TSNE.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "None", "description": "Ignored." @@ -157494,7 +156921,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit X into an embedded space.", "docstring": "Fit X into an embedded space.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features) or (n_samples, n_samples)\n If the metric is 'precomputed' X must be a square distance\n matrix. Otherwise it contains a sample per row. If the method\n is 'exact', X may be a sparse matrix of type 'csr', 'csc'\n or 'coo'. If the method is 'barnes_hut' and the metric is\n 'precomputed', X may be a precomputed sparse graph.\n\n y : None\n Ignored.\n\n Returns\n -------\n X_new : array of shape (n_samples, n_components)\n Embedding of the training data in low-dimensional space.\n " }, @@ -157510,7 +156937,7 @@ "qname": "sklearn.manifold._t_sne.TSNE.fit_transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -157523,7 +156950,7 @@ "qname": "sklearn.manifold._t_sne.TSNE.fit_transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (n_samples, n_features) or (n_samples, n_samples)", "description": "If the metric is 'precomputed' X must be a square distance\nmatrix. Otherwise it contains a sample per row. If the method\nis 'exact', X may be a sparse matrix of type 'csr', 'csc'\nor 'coo'. If the method is 'barnes_hut' and the metric is\n'precomputed', X may be a precomputed sparse graph." @@ -157539,7 +156966,7 @@ "qname": "sklearn.manifold._t_sne.TSNE.fit_transform.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "None", "description": "Ignored." @@ -157551,7 +156978,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit X into an embedded space and return that transformed output.", "docstring": "Fit X into an embedded space and return that transformed output.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features) or (n_samples, n_samples)\n If the metric is 'precomputed' X must be a square distance\n matrix. Otherwise it contains a sample per row. If the method\n is 'exact', X may be a sparse matrix of type 'csr', 'csc'\n or 'coo'. If the method is 'barnes_hut' and the metric is\n 'precomputed', X may be a precomputed sparse graph.\n\n y : None\n Ignored.\n\n Returns\n -------\n X_new : ndarray of shape (n_samples, n_components)\n Embedding of the training data in low-dimensional space.\n " }, @@ -158431,7 +157858,7 @@ }, "type": { "kind": "EnumType", - "values": ["macro", "samples", "weighted", "micro"] + "values": ["macro", "micro", "samples", "weighted"] } }, { @@ -159208,116 +158635,6 @@ "description": "Compute the Brier score loss.\n\nThe smaller the Brier score loss, the better, hence the naming with \"loss\".\nThe Brier score measures the mean squared difference between the predicted\nprobability and the actual outcome. The Brier score always\ntakes on a value between zero and one, since this is the largest\npossible difference between a predicted probability (which must be\nbetween zero and one) and the actual outcome (which can take on values\nof only 0 and 1). It can be decomposed is the sum of refinement loss and\ncalibration loss.\n\nThe Brier score is appropriate for binary and categorical outcomes that\ncan be structured as true or false, but is inappropriate for ordinal\nvariables which can take on three or more values (this is because the\nBrier score assumes that all possible outcomes are equivalently\n\"distant\" from one another). Which label is considered to be the positive\nlabel is controlled via the parameter `pos_label`, which defaults to\nthe greater label unless `y_true` is all 0 or all -1, in which case\n`pos_label` defaults to 1.\n\nRead more in the :ref:`User Guide `.", "docstring": "Compute the Brier score loss.\n\n The smaller the Brier score loss, the better, hence the naming with \"loss\".\n The Brier score measures the mean squared difference between the predicted\n probability and the actual outcome. The Brier score always\n takes on a value between zero and one, since this is the largest\n possible difference between a predicted probability (which must be\n between zero and one) and the actual outcome (which can take on values\n of only 0 and 1). It can be decomposed is the sum of refinement loss and\n calibration loss.\n\n The Brier score is appropriate for binary and categorical outcomes that\n can be structured as true or false, but is inappropriate for ordinal\n variables which can take on three or more values (this is because the\n Brier score assumes that all possible outcomes are equivalently\n \"distant\" from one another). Which label is considered to be the positive\n label is controlled via the parameter `pos_label`, which defaults to\n the greater label unless `y_true` is all 0 or all -1, in which case\n `pos_label` defaults to 1.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n y_true : array of shape (n_samples,)\n True targets.\n\n y_prob : array of shape (n_samples,)\n Probabilities of the positive class.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n pos_label : int or str, default=None\n Label of the positive class. `pos_label` will be inferred in the\n following manner:\n\n * if `y_true` in {-1, 1} or {0, 1}, `pos_label` defaults to 1;\n * else if `y_true` contains string, an error will be raised and\n `pos_label` should be explicitly specified;\n * otherwise, `pos_label` defaults to the greater label,\n i.e. `np.unique(y_true)[-1]`.\n\n Returns\n -------\n score : float\n Brier score loss.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.metrics import brier_score_loss\n >>> y_true = np.array([0, 1, 1, 0])\n >>> y_true_categorical = np.array([\"spam\", \"ham\", \"ham\", \"spam\"])\n >>> y_prob = np.array([0.1, 0.9, 0.8, 0.3])\n >>> brier_score_loss(y_true, y_prob)\n 0.037...\n >>> brier_score_loss(y_true, 1-y_prob, pos_label=0)\n 0.037...\n >>> brier_score_loss(y_true_categorical, y_prob, pos_label=\"ham\")\n 0.037...\n >>> brier_score_loss(y_true, np.array(y_prob) > 0.5)\n 0.0\n\n References\n ----------\n .. [1] `Wikipedia entry for the Brier score\n `_.\n " }, - { - "id": "sklearn/sklearn.metrics._classification/class_likelihood_ratios", - "name": "class_likelihood_ratios", - "qname": "sklearn.metrics._classification.class_likelihood_ratios", - "decorators": [], - "parameters": [ - { - "id": "sklearn/sklearn.metrics._classification/class_likelihood_ratios/y_true", - "name": "y_true", - "qname": "sklearn.metrics._classification.class_likelihood_ratios.y_true", - "default_value": null, - "assigned_by": "POSITION_OR_NAME", - "is_public": true, - "docstring": { - "type": "1d array-like, or label indicator array / sparse matrix", - "description": "Ground truth (correct) target values." - }, - "type": { - "kind": "UnionType", - "types": [ - { - "kind": "NamedType", - "name": "1d array-like" - }, - { - "kind": "NamedType", - "name": "label indicator array / sparse matrix" - } - ] - } - }, - { - "id": "sklearn/sklearn.metrics._classification/class_likelihood_ratios/y_pred", - "name": "y_pred", - "qname": "sklearn.metrics._classification.class_likelihood_ratios.y_pred", - "default_value": null, - "assigned_by": "POSITION_OR_NAME", - "is_public": true, - "docstring": { - "type": "1d array-like, or label indicator array / sparse matrix", - "description": "Estimated targets as returned by a classifier." - }, - "type": { - "kind": "UnionType", - "types": [ - { - "kind": "NamedType", - "name": "1d array-like" - }, - { - "kind": "NamedType", - "name": "label indicator array / sparse matrix" - } - ] - } - }, - { - "id": "sklearn/sklearn.metrics._classification/class_likelihood_ratios/labels", - "name": "labels", - "qname": "sklearn.metrics._classification.class_likelihood_ratios.labels", - "default_value": "None", - "assigned_by": "NAME_ONLY", - "is_public": true, - "docstring": { - "type": "array-like, default=None", - "description": "List of labels to index the matrix. This may be used to select the\npositive and negative classes with the ordering `labels=[negative_class,\npositive_class]`. If `None` is given, those that appear at least once in\n`y_true` or `y_pred` are used in sorted order." - }, - "type": { - "kind": "NamedType", - "name": "array-like" - } - }, - { - "id": "sklearn/sklearn.metrics._classification/class_likelihood_ratios/sample_weight", - "name": "sample_weight", - "qname": "sklearn.metrics._classification.class_likelihood_ratios.sample_weight", - "default_value": "None", - "assigned_by": "NAME_ONLY", - "is_public": true, - "docstring": { - "type": "array-like of shape (n_samples,), default=None", - "description": "Sample weights." - }, - "type": { - "kind": "NamedType", - "name": "array-like of shape (n_samples,)" - } - }, - { - "id": "sklearn/sklearn.metrics._classification/class_likelihood_ratios/raise_warning", - "name": "raise_warning", - "qname": "sklearn.metrics._classification.class_likelihood_ratios.raise_warning", - "default_value": "True", - "assigned_by": "NAME_ONLY", - "is_public": true, - "docstring": { - "type": "bool, default=True", - "description": "Whether or not a case-specific warning message is raised when there is a\nzero division. Even if the error is not raised, the function will return\nnan in such cases." - }, - "type": { - "kind": "NamedType", - "name": "bool" - } - } - ], - "results": [], - "is_public": true, - "description": "Compute binary classification positive and negative likelihood ratios.\n\nThe positive likelihood ratio is `LR+ = sensitivity / (1 - specificity)`\nwhere the sensitivity or recall is the ratio `tp / (tp + fn)` and the\nspecificity is `tn / (tn + fp)`. The negative likelihood ratio is `LR- = (1\n- sensitivity) / specificity`. Here `tp` is the number of true positives,\n`fp` the number of false positives, `tn` is the number of true negatives and\n`fn` the number of false negatives. Both class likelihood ratios can be used\nto obtain post-test probabilities given a pre-test probability.\n\n`LR+` ranges from 1 to infinity. A `LR+` of 1 indicates that the probability\nof predicting the positive class is the same for samples belonging to either\nclass; therefore, the test is useless. The greater `LR+` is, the more a\npositive prediction is likely to be a true positive when compared with the\npre-test probability. A value of `LR+` lower than 1 is invalid as it would\nindicate that the odds of a sample being a true positive decrease with\nrespect to the pre-test odds.\n\n`LR-` ranges from 0 to 1. The closer it is to 0, the lower the probability\nof a given sample to be a false negative. A `LR-` of 1 means the test is\nuseless because the odds of having the condition did not change after the\ntest. A value of `LR-` greater than 1 invalidates the classifier as it\nindicates an increase in the odds of a sample belonging to the positive\nclass after being classified as negative. This is the case when the\nclassifier systematically predicts the opposite of the true label.\n\nA typical application in medicine is to identify the positive/negative class\nto the presence/absence of a disease, respectively; the classifier being a\ndiagnostic test; the pre-test probability of an individual having the\ndisease can be the prevalence of such disease (proportion of a particular\npopulation found to be affected by a medical condition); and the post-test\nprobabilities would be the probability that the condition is truly present\ngiven a positive test result.\n\nRead more in the :ref:`User Guide `.", - "docstring": "Compute binary classification positive and negative likelihood ratios.\n\n The positive likelihood ratio is `LR+ = sensitivity / (1 - specificity)`\n where the sensitivity or recall is the ratio `tp / (tp + fn)` and the\n specificity is `tn / (tn + fp)`. The negative likelihood ratio is `LR- = (1\n - sensitivity) / specificity`. Here `tp` is the number of true positives,\n `fp` the number of false positives, `tn` is the number of true negatives and\n `fn` the number of false negatives. Both class likelihood ratios can be used\n to obtain post-test probabilities given a pre-test probability.\n\n `LR+` ranges from 1 to infinity. A `LR+` of 1 indicates that the probability\n of predicting the positive class is the same for samples belonging to either\n class; therefore, the test is useless. The greater `LR+` is, the more a\n positive prediction is likely to be a true positive when compared with the\n pre-test probability. A value of `LR+` lower than 1 is invalid as it would\n indicate that the odds of a sample being a true positive decrease with\n respect to the pre-test odds.\n\n `LR-` ranges from 0 to 1. The closer it is to 0, the lower the probability\n of a given sample to be a false negative. A `LR-` of 1 means the test is\n useless because the odds of having the condition did not change after the\n test. A value of `LR-` greater than 1 invalidates the classifier as it\n indicates an increase in the odds of a sample belonging to the positive\n class after being classified as negative. This is the case when the\n classifier systematically predicts the opposite of the true label.\n\n A typical application in medicine is to identify the positive/negative class\n to the presence/absence of a disease, respectively; the classifier being a\n diagnostic test; the pre-test probability of an individual having the\n disease can be the prevalence of such disease (proportion of a particular\n population found to be affected by a medical condition); and the post-test\n probabilities would be the probability that the condition is truly present\n given a positive test result.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n y_true : 1d array-like, or label indicator array / sparse matrix\n Ground truth (correct) target values.\n\n y_pred : 1d array-like, or label indicator array / sparse matrix\n Estimated targets as returned by a classifier.\n\n labels : array-like, default=None\n List of labels to index the matrix. This may be used to select the\n positive and negative classes with the ordering `labels=[negative_class,\n positive_class]`. If `None` is given, those that appear at least once in\n `y_true` or `y_pred` are used in sorted order.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n raise_warning : bool, default=True\n Whether or not a case-specific warning message is raised when there is a\n zero division. Even if the error is not raised, the function will return\n nan in such cases.\n\n Returns\n -------\n (positive_likelihood_ratio, negative_likelihood_ratio) : tuple\n A tuple of two float, the first containing the Positive likelihood ratio\n and the second the Negative likelihood ratio.\n\n Warns\n -----\n When `false positive == 0`, the positive likelihood ratio is undefined.\n When `true negative == 0`, the negative likelihood ratio is undefined.\n When `true positive + false negative == 0` both ratios are undefined.\n In such cases, `UserWarning` will be raised if raise_warning=True.\n\n References\n ----------\n .. [1] `Wikipedia entry for the Likelihood ratios in diagnostic testing\n `_.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.metrics import class_likelihood_ratios\n >>> class_likelihood_ratios([0, 1, 0, 1, 0], [1, 1, 0, 0, 0])\n (1.5, 0.75)\n >>> y_true = np.array([\"non-cat\", \"cat\", \"non-cat\", \"cat\", \"non-cat\"])\n >>> y_pred = np.array([\"cat\", \"cat\", \"non-cat\", \"non-cat\", \"non-cat\"])\n >>> class_likelihood_ratios(y_true, y_pred)\n (1.33..., 0.66...)\n >>> y_true = np.array([\"non-zebra\", \"zebra\", \"non-zebra\", \"zebra\", \"non-zebra\"])\n >>> y_pred = np.array([\"zebra\", \"zebra\", \"non-zebra\", \"non-zebra\", \"non-zebra\"])\n >>> class_likelihood_ratios(y_true, y_pred)\n (1.5, 0.75)\n\n To avoid ambiguities, use the notation `labels=[negative_class,\n positive_class]`\n\n >>> y_true = np.array([\"non-cat\", \"cat\", \"non-cat\", \"cat\", \"non-cat\"])\n >>> y_pred = np.array([\"cat\", \"cat\", \"non-cat\", \"non-cat\", \"non-cat\"])\n >>> class_likelihood_ratios(y_true, y_pred, labels=[\"non-cat\", \"cat\"])\n (1.5, 0.75)\n " - }, { "id": "sklearn/sklearn.metrics._classification/classification_report", "name": "classification_report", @@ -159556,7 +158873,7 @@ }, "type": { "kind": "EnumType", - "values": ["quadratic", "linear"] + "values": ["linear", "quadratic"] } }, { @@ -159664,7 +158981,7 @@ }, "type": { "kind": "EnumType", - "values": ["true", "all", "pred"] + "values": ["all", "pred", "true"] } } ], @@ -159786,7 +159103,7 @@ "types": [ { "kind": "EnumType", - "values": ["weighted", "binary", "micro", "macro", "samples"] + "values": ["macro", "binary", "micro", "samples", "weighted"] }, { "kind": "NamedType", @@ -159975,7 +159292,7 @@ "types": [ { "kind": "EnumType", - "values": ["weighted", "binary", "micro", "macro", "samples"] + "values": ["macro", "binary", "micro", "samples", "weighted"] }, { "kind": "NamedType", @@ -160302,7 +159619,7 @@ "types": [ { "kind": "EnumType", - "values": ["weighted", "binary", "micro", "macro", "samples"] + "values": ["macro", "binary", "micro", "samples", "weighted"] }, { "kind": "NamedType", @@ -160798,7 +160115,7 @@ }, "type": { "kind": "EnumType", - "values": ["samples", "weighted", "micro", "macro", "binary"] + "values": ["macro", "binary", "micro", "samples", "weighted"] } }, { @@ -160994,7 +160311,7 @@ "types": [ { "kind": "EnumType", - "values": ["weighted", "binary", "micro", "macro", "samples"] + "values": ["macro", "binary", "micro", "samples", "weighted"] }, { "kind": "NamedType", @@ -161167,7 +160484,7 @@ "types": [ { "kind": "EnumType", - "values": ["weighted", "binary", "micro", "macro", "samples"] + "values": ["macro", "binary", "micro", "samples", "weighted"] }, { "kind": "NamedType", @@ -161462,7 +160779,7 @@ "qname": "sklearn.metrics._plot.confusion_matrix.ConfusionMatrixDisplay.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -161475,7 +160792,7 @@ "qname": "sklearn.metrics._plot.confusion_matrix.ConfusionMatrixDisplay.__init__.confusion_matrix", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (n_classes, n_classes)", "description": "Confusion matrix." @@ -161491,7 +160808,7 @@ "qname": "sklearn.metrics._plot.confusion_matrix.ConfusionMatrixDisplay.__init__.display_labels", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (n_classes,), default=None", "description": "Display labels for plot. If None, display labels are set from 0 to\n`n_classes - 1`." @@ -161503,7 +160820,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -161519,7 +160836,7 @@ "qname": "sklearn.metrics._plot.confusion_matrix.ConfusionMatrixDisplay.from_estimator.cls", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -161532,7 +160849,7 @@ "qname": "sklearn.metrics._plot.confusion_matrix.ConfusionMatrixDisplay.from_estimator.estimator", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "estimator instance", "description": "Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline`\nin which the last estimator is a classifier." @@ -161548,7 +160865,7 @@ "qname": "sklearn.metrics._plot.confusion_matrix.ConfusionMatrixDisplay.from_estimator.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Input values." @@ -161573,7 +160890,7 @@ "qname": "sklearn.metrics._plot.confusion_matrix.ConfusionMatrixDisplay.from_estimator.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "Target values." @@ -161589,7 +160906,7 @@ "qname": "sklearn.metrics._plot.confusion_matrix.ConfusionMatrixDisplay.from_estimator.labels", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_classes,), default=None", "description": "List of labels to index the confusion matrix. This may be used to\nreorder or select a subset of labels. If `None` is given, those\nthat appear at least once in `y_true` or `y_pred` are used in\nsorted order." @@ -161605,7 +160922,7 @@ "qname": "sklearn.metrics._plot.confusion_matrix.ConfusionMatrixDisplay.from_estimator.sample_weight", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "Sample weights." @@ -161621,14 +160938,14 @@ "qname": "sklearn.metrics._plot.confusion_matrix.ConfusionMatrixDisplay.from_estimator.normalize", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'true', 'pred', 'all'}, default=None", "description": "Either to normalize the counts display in the matrix:\n\n- if `'true'`, the confusion matrix is normalized over the true\n conditions (e.g. rows);\n- if `'pred'`, the confusion matrix is normalized over the\n predicted conditions (e.g. columns);\n- if `'all'`, the confusion matrix is normalized by the total\n number of samples;\n- if `None` (default), the confusion matrix will not be normalized." }, "type": { "kind": "EnumType", - "values": ["true", "all", "pred"] + "values": ["all", "pred", "true"] } }, { @@ -161637,7 +160954,7 @@ "qname": "sklearn.metrics._plot.confusion_matrix.ConfusionMatrixDisplay.from_estimator.display_labels", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_classes,), default=None", "description": "Target names used for plotting. By default, `labels` will be used\nif it is defined, otherwise the unique labels of `y_true` and\n`y_pred` will be used." @@ -161653,7 +160970,7 @@ "qname": "sklearn.metrics._plot.confusion_matrix.ConfusionMatrixDisplay.from_estimator.include_values", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Includes values in confusion matrix." @@ -161669,7 +160986,7 @@ "qname": "sklearn.metrics._plot.confusion_matrix.ConfusionMatrixDisplay.from_estimator.xticks_rotation", "default_value": "'horizontal'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'vertical', 'horizontal'} or float, default='horizontal'", "description": "Rotation of xtick labels." @@ -161694,7 +161011,7 @@ "qname": "sklearn.metrics._plot.confusion_matrix.ConfusionMatrixDisplay.from_estimator.values_format", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str, default=None", "description": "Format specification for values in confusion matrix. If `None`, the\nformat specification is 'd' or '.2g' whichever is shorter." @@ -161710,7 +161027,7 @@ "qname": "sklearn.metrics._plot.confusion_matrix.ConfusionMatrixDisplay.from_estimator.cmap", "default_value": "'viridis'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str or matplotlib Colormap, default='viridis'", "description": "Colormap recognized by matplotlib." @@ -161735,7 +161052,7 @@ "qname": "sklearn.metrics._plot.confusion_matrix.ConfusionMatrixDisplay.from_estimator.ax", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "matplotlib Axes, default=None", "description": "Axes object to plot on. If `None`, a new figure and axes is\ncreated." @@ -161751,7 +161068,7 @@ "qname": "sklearn.metrics._plot.confusion_matrix.ConfusionMatrixDisplay.from_estimator.colorbar", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether or not to add a colorbar to the plot." @@ -161767,7 +161084,7 @@ "qname": "sklearn.metrics._plot.confusion_matrix.ConfusionMatrixDisplay.from_estimator.im_kw", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict, default=None", "description": "Dict with keywords passed to `matplotlib.pyplot.imshow` call." @@ -161779,7 +161096,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Plot Confusion Matrix given an estimator and some data.\n\nRead more in the :ref:`User Guide `.\n\n.. versionadded:: 1.0", "docstring": "Plot Confusion Matrix given an estimator and some data.\n\n Read more in the :ref:`User Guide `.\n\n .. versionadded:: 1.0\n\n Parameters\n ----------\n estimator : estimator instance\n Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline`\n in which the last estimator is a classifier.\n\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Input values.\n\n y : array-like of shape (n_samples,)\n Target values.\n\n labels : array-like of shape (n_classes,), default=None\n List of labels to index the confusion matrix. This may be used to\n reorder or select a subset of labels. If `None` is given, those\n that appear at least once in `y_true` or `y_pred` are used in\n sorted order.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n normalize : {'true', 'pred', 'all'}, default=None\n Either to normalize the counts display in the matrix:\n\n - if `'true'`, the confusion matrix is normalized over the true\n conditions (e.g. rows);\n - if `'pred'`, the confusion matrix is normalized over the\n predicted conditions (e.g. columns);\n - if `'all'`, the confusion matrix is normalized by the total\n number of samples;\n - if `None` (default), the confusion matrix will not be normalized.\n\n display_labels : array-like of shape (n_classes,), default=None\n Target names used for plotting. By default, `labels` will be used\n if it is defined, otherwise the unique labels of `y_true` and\n `y_pred` will be used.\n\n include_values : bool, default=True\n Includes values in confusion matrix.\n\n xticks_rotation : {'vertical', 'horizontal'} or float, default='horizontal'\n Rotation of xtick labels.\n\n values_format : str, default=None\n Format specification for values in confusion matrix. If `None`, the\n format specification is 'd' or '.2g' whichever is shorter.\n\n cmap : str or matplotlib Colormap, default='viridis'\n Colormap recognized by matplotlib.\n\n ax : matplotlib Axes, default=None\n Axes object to plot on. If `None`, a new figure and axes is\n created.\n\n colorbar : bool, default=True\n Whether or not to add a colorbar to the plot.\n\n im_kw : dict, default=None\n Dict with keywords passed to `matplotlib.pyplot.imshow` call.\n\n Returns\n -------\n display : :class:`~sklearn.metrics.ConfusionMatrixDisplay`\n\n See Also\n --------\n ConfusionMatrixDisplay.from_predictions : Plot the confusion matrix\n given the true and predicted labels.\n\n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> from sklearn.datasets import make_classification\n >>> from sklearn.metrics import ConfusionMatrixDisplay\n >>> from sklearn.model_selection import train_test_split\n >>> from sklearn.svm import SVC\n >>> X, y = make_classification(random_state=0)\n >>> X_train, X_test, y_train, y_test = train_test_split(\n ... X, y, random_state=0)\n >>> clf = SVC(random_state=0)\n >>> clf.fit(X_train, y_train)\n SVC(random_state=0)\n >>> ConfusionMatrixDisplay.from_estimator(\n ... clf, X_test, y_test)\n <...>\n >>> plt.show()\n " }, @@ -161795,7 +161112,7 @@ "qname": "sklearn.metrics._plot.confusion_matrix.ConfusionMatrixDisplay.from_predictions.cls", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -161808,7 +161125,7 @@ "qname": "sklearn.metrics._plot.confusion_matrix.ConfusionMatrixDisplay.from_predictions.y_true", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "True labels." @@ -161824,7 +161141,7 @@ "qname": "sklearn.metrics._plot.confusion_matrix.ConfusionMatrixDisplay.from_predictions.y_pred", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "The predicted labels given by the method `predict` of an\nclassifier." @@ -161840,7 +161157,7 @@ "qname": "sklearn.metrics._plot.confusion_matrix.ConfusionMatrixDisplay.from_predictions.labels", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_classes,), default=None", "description": "List of labels to index the confusion matrix. This may be used to\nreorder or select a subset of labels. If `None` is given, those\nthat appear at least once in `y_true` or `y_pred` are used in\nsorted order." @@ -161856,7 +161173,7 @@ "qname": "sklearn.metrics._plot.confusion_matrix.ConfusionMatrixDisplay.from_predictions.sample_weight", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "Sample weights." @@ -161872,14 +161189,14 @@ "qname": "sklearn.metrics._plot.confusion_matrix.ConfusionMatrixDisplay.from_predictions.normalize", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'true', 'pred', 'all'}, default=None", "description": "Either to normalize the counts display in the matrix:\n\n- if `'true'`, the confusion matrix is normalized over the true\n conditions (e.g. rows);\n- if `'pred'`, the confusion matrix is normalized over the\n predicted conditions (e.g. columns);\n- if `'all'`, the confusion matrix is normalized by the total\n number of samples;\n- if `None` (default), the confusion matrix will not be normalized." }, "type": { "kind": "EnumType", - "values": ["true", "all", "pred"] + "values": ["all", "pred", "true"] } }, { @@ -161888,7 +161205,7 @@ "qname": "sklearn.metrics._plot.confusion_matrix.ConfusionMatrixDisplay.from_predictions.display_labels", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_classes,), default=None", "description": "Target names used for plotting. By default, `labels` will be used\nif it is defined, otherwise the unique labels of `y_true` and\n`y_pred` will be used." @@ -161904,7 +161221,7 @@ "qname": "sklearn.metrics._plot.confusion_matrix.ConfusionMatrixDisplay.from_predictions.include_values", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Includes values in confusion matrix." @@ -161920,7 +161237,7 @@ "qname": "sklearn.metrics._plot.confusion_matrix.ConfusionMatrixDisplay.from_predictions.xticks_rotation", "default_value": "'horizontal'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'vertical', 'horizontal'} or float, default='horizontal'", "description": "Rotation of xtick labels." @@ -161945,7 +161262,7 @@ "qname": "sklearn.metrics._plot.confusion_matrix.ConfusionMatrixDisplay.from_predictions.values_format", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str, default=None", "description": "Format specification for values in confusion matrix. If `None`, the\nformat specification is 'd' or '.2g' whichever is shorter." @@ -161961,7 +161278,7 @@ "qname": "sklearn.metrics._plot.confusion_matrix.ConfusionMatrixDisplay.from_predictions.cmap", "default_value": "'viridis'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str or matplotlib Colormap, default='viridis'", "description": "Colormap recognized by matplotlib." @@ -161986,7 +161303,7 @@ "qname": "sklearn.metrics._plot.confusion_matrix.ConfusionMatrixDisplay.from_predictions.ax", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "matplotlib Axes, default=None", "description": "Axes object to plot on. If `None`, a new figure and axes is\ncreated." @@ -162002,7 +161319,7 @@ "qname": "sklearn.metrics._plot.confusion_matrix.ConfusionMatrixDisplay.from_predictions.colorbar", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether or not to add a colorbar to the plot." @@ -162018,7 +161335,7 @@ "qname": "sklearn.metrics._plot.confusion_matrix.ConfusionMatrixDisplay.from_predictions.im_kw", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict, default=None", "description": "Dict with keywords passed to `matplotlib.pyplot.imshow` call." @@ -162030,7 +161347,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Plot Confusion Matrix given true and predicted labels.\n\nRead more in the :ref:`User Guide `.\n\n.. versionadded:: 1.0", "docstring": "Plot Confusion Matrix given true and predicted labels.\n\n Read more in the :ref:`User Guide `.\n\n .. versionadded:: 1.0\n\n Parameters\n ----------\n y_true : array-like of shape (n_samples,)\n True labels.\n\n y_pred : array-like of shape (n_samples,)\n The predicted labels given by the method `predict` of an\n classifier.\n\n labels : array-like of shape (n_classes,), default=None\n List of labels to index the confusion matrix. This may be used to\n reorder or select a subset of labels. If `None` is given, those\n that appear at least once in `y_true` or `y_pred` are used in\n sorted order.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n normalize : {'true', 'pred', 'all'}, default=None\n Either to normalize the counts display in the matrix:\n\n - if `'true'`, the confusion matrix is normalized over the true\n conditions (e.g. rows);\n - if `'pred'`, the confusion matrix is normalized over the\n predicted conditions (e.g. columns);\n - if `'all'`, the confusion matrix is normalized by the total\n number of samples;\n - if `None` (default), the confusion matrix will not be normalized.\n\n display_labels : array-like of shape (n_classes,), default=None\n Target names used for plotting. By default, `labels` will be used\n if it is defined, otherwise the unique labels of `y_true` and\n `y_pred` will be used.\n\n include_values : bool, default=True\n Includes values in confusion matrix.\n\n xticks_rotation : {'vertical', 'horizontal'} or float, default='horizontal'\n Rotation of xtick labels.\n\n values_format : str, default=None\n Format specification for values in confusion matrix. If `None`, the\n format specification is 'd' or '.2g' whichever is shorter.\n\n cmap : str or matplotlib Colormap, default='viridis'\n Colormap recognized by matplotlib.\n\n ax : matplotlib Axes, default=None\n Axes object to plot on. If `None`, a new figure and axes is\n created.\n\n colorbar : bool, default=True\n Whether or not to add a colorbar to the plot.\n\n im_kw : dict, default=None\n Dict with keywords passed to `matplotlib.pyplot.imshow` call.\n\n Returns\n -------\n display : :class:`~sklearn.metrics.ConfusionMatrixDisplay`\n\n See Also\n --------\n ConfusionMatrixDisplay.from_estimator : Plot the confusion matrix\n given an estimator, the data, and the label.\n\n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> from sklearn.datasets import make_classification\n >>> from sklearn.metrics import ConfusionMatrixDisplay\n >>> from sklearn.model_selection import train_test_split\n >>> from sklearn.svm import SVC\n >>> X, y = make_classification(random_state=0)\n >>> X_train, X_test, y_train, y_test = train_test_split(\n ... X, y, random_state=0)\n >>> clf = SVC(random_state=0)\n >>> clf.fit(X_train, y_train)\n SVC(random_state=0)\n >>> y_pred = clf.predict(X_test)\n >>> ConfusionMatrixDisplay.from_predictions(\n ... y_test, y_pred)\n <...>\n >>> plt.show()\n " }, @@ -162046,7 +161363,7 @@ "qname": "sklearn.metrics._plot.confusion_matrix.ConfusionMatrixDisplay.plot.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -162059,7 +161376,7 @@ "qname": "sklearn.metrics._plot.confusion_matrix.ConfusionMatrixDisplay.plot.include_values", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Includes values in confusion matrix." @@ -162075,7 +161392,7 @@ "qname": "sklearn.metrics._plot.confusion_matrix.ConfusionMatrixDisplay.plot.cmap", "default_value": "'viridis'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str or matplotlib Colormap, default='viridis'", "description": "Colormap recognized by matplotlib." @@ -162100,7 +161417,7 @@ "qname": "sklearn.metrics._plot.confusion_matrix.ConfusionMatrixDisplay.plot.xticks_rotation", "default_value": "'horizontal'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'vertical', 'horizontal'} or float, default='horizontal'", "description": "Rotation of xtick labels." @@ -162125,7 +161442,7 @@ "qname": "sklearn.metrics._plot.confusion_matrix.ConfusionMatrixDisplay.plot.values_format", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str, default=None", "description": "Format specification for values in confusion matrix. If `None`,\nthe format specification is 'd' or '.2g' whichever is shorter." @@ -162141,7 +161458,7 @@ "qname": "sklearn.metrics._plot.confusion_matrix.ConfusionMatrixDisplay.plot.ax", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "matplotlib axes, default=None", "description": "Axes object to plot on. If `None`, a new figure and axes is\ncreated." @@ -162157,7 +161474,7 @@ "qname": "sklearn.metrics._plot.confusion_matrix.ConfusionMatrixDisplay.plot.colorbar", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether or not to add a colorbar to the plot." @@ -162173,7 +161490,7 @@ "qname": "sklearn.metrics._plot.confusion_matrix.ConfusionMatrixDisplay.plot.im_kw", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict, default=None", "description": "Dict with keywords passed to `matplotlib.pyplot.imshow` call." @@ -162185,7 +161502,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Plot visualization.", "docstring": "Plot visualization.\n\n Parameters\n ----------\n include_values : bool, default=True\n Includes values in confusion matrix.\n\n cmap : str or matplotlib Colormap, default='viridis'\n Colormap recognized by matplotlib.\n\n xticks_rotation : {'vertical', 'horizontal'} or float, default='horizontal'\n Rotation of xtick labels.\n\n values_format : str, default=None\n Format specification for values in confusion matrix. If `None`,\n the format specification is 'd' or '.2g' whichever is shorter.\n\n ax : matplotlib axes, default=None\n Axes object to plot on. If `None`, a new figure and axes is\n created.\n\n colorbar : bool, default=True\n Whether or not to add a colorbar to the plot.\n\n im_kw : dict, default=None\n Dict with keywords passed to `matplotlib.pyplot.imshow` call.\n\n Returns\n -------\n display : :class:`~sklearn.metrics.ConfusionMatrixDisplay`\n " }, @@ -162299,7 +161616,7 @@ }, "type": { "kind": "EnumType", - "values": ["true", "all", "pred"] + "values": ["all", "pred", "true"] } }, { @@ -162450,7 +161767,7 @@ "qname": "sklearn.metrics._plot.det_curve.DetCurveDisplay.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -162463,7 +161780,7 @@ "qname": "sklearn.metrics._plot.det_curve.DetCurveDisplay.__init__.fpr", "default_value": null, "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray", "description": "False positive rate." @@ -162479,7 +161796,7 @@ "qname": "sklearn.metrics._plot.det_curve.DetCurveDisplay.__init__.fnr", "default_value": null, "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray", "description": "False negative rate." @@ -162495,7 +161812,7 @@ "qname": "sklearn.metrics._plot.det_curve.DetCurveDisplay.__init__.estimator_name", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str, default=None", "description": "Name of estimator. If None, the estimator name is not shown." @@ -162511,7 +161828,7 @@ "qname": "sklearn.metrics._plot.det_curve.DetCurveDisplay.__init__.pos_label", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str or int, default=None", "description": "The label of the positive class." @@ -162532,7 +161849,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -162548,7 +161865,7 @@ "qname": "sklearn.metrics._plot.det_curve.DetCurveDisplay.from_estimator.cls", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -162561,7 +161878,7 @@ "qname": "sklearn.metrics._plot.det_curve.DetCurveDisplay.from_estimator.estimator", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "estimator instance", "description": "Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline`\nin which the last estimator is a classifier." @@ -162577,7 +161894,7 @@ "qname": "sklearn.metrics._plot.det_curve.DetCurveDisplay.from_estimator.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Input values." @@ -162602,7 +161919,7 @@ "qname": "sklearn.metrics._plot.det_curve.DetCurveDisplay.from_estimator.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "Target values." @@ -162618,7 +161935,7 @@ "qname": "sklearn.metrics._plot.det_curve.DetCurveDisplay.from_estimator.sample_weight", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "Sample weights." @@ -162634,14 +161951,14 @@ "qname": "sklearn.metrics._plot.det_curve.DetCurveDisplay.from_estimator.response_method", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'predict_proba', 'decision_function', 'auto'} default='auto'", "description": "Specifies whether to use :term:`predict_proba` or\n:term:`decision_function` as the predicted target response. If set\nto 'auto', :term:`predict_proba` is tried first and if it does not\nexist :term:`decision_function` is tried next." }, "type": { "kind": "EnumType", - "values": ["decision_function", "auto", "predict_proba"] + "values": ["auto", "predict_proba", "decision_function"] } }, { @@ -162650,7 +161967,7 @@ "qname": "sklearn.metrics._plot.det_curve.DetCurveDisplay.from_estimator.pos_label", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str or int, default=None", "description": "The label of the positive class. When `pos_label=None`, if `y_true`\nis in {-1, 1} or {0, 1}, `pos_label` is set to 1, otherwise an\nerror will be raised." @@ -162675,7 +161992,7 @@ "qname": "sklearn.metrics._plot.det_curve.DetCurveDisplay.from_estimator.name", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str, default=None", "description": "Name of DET curve for labeling. If `None`, use the name of the\nestimator." @@ -162691,7 +162008,7 @@ "qname": "sklearn.metrics._plot.det_curve.DetCurveDisplay.from_estimator.ax", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "matplotlib axes, default=None", "description": "Axes object to plot on. If `None`, a new figure and axes is\ncreated." @@ -162703,7 +162020,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Plot DET curve given an estimator and data.\n\nRead more in the :ref:`User Guide `.\n\n.. versionadded:: 1.0", "docstring": "Plot DET curve given an estimator and data.\n\n Read more in the :ref:`User Guide `.\n\n .. versionadded:: 1.0\n\n Parameters\n ----------\n estimator : estimator instance\n Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline`\n in which the last estimator is a classifier.\n\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Input values.\n\n y : array-like of shape (n_samples,)\n Target values.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n response_method : {'predict_proba', 'decision_function', 'auto'} default='auto'\n Specifies whether to use :term:`predict_proba` or\n :term:`decision_function` as the predicted target response. If set\n to 'auto', :term:`predict_proba` is tried first and if it does not\n exist :term:`decision_function` is tried next.\n\n pos_label : str or int, default=None\n The label of the positive class. When `pos_label=None`, if `y_true`\n is in {-1, 1} or {0, 1}, `pos_label` is set to 1, otherwise an\n error will be raised.\n\n name : str, default=None\n Name of DET curve for labeling. If `None`, use the name of the\n estimator.\n\n ax : matplotlib axes, default=None\n Axes object to plot on. If `None`, a new figure and axes is\n created.\n\n **kwargs : dict\n Additional keywords arguments passed to matplotlib `plot` function.\n\n Returns\n -------\n display : :class:`~sklearn.metrics.DetCurveDisplay`\n Object that stores computed values.\n\n See Also\n --------\n det_curve : Compute error rates for different probability thresholds.\n DetCurveDisplay.from_predictions : Plot DET curve given the true and\n predicted labels.\n plot_roc_curve : Plot Receiver operating characteristic (ROC) curve.\n\n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> from sklearn.datasets import make_classification\n >>> from sklearn.metrics import DetCurveDisplay\n >>> from sklearn.model_selection import train_test_split\n >>> from sklearn.svm import SVC\n >>> X, y = make_classification(n_samples=1000, random_state=0)\n >>> X_train, X_test, y_train, y_test = train_test_split(\n ... X, y, test_size=0.4, random_state=0)\n >>> clf = SVC(random_state=0).fit(X_train, y_train)\n >>> DetCurveDisplay.from_estimator(\n ... clf, X_test, y_test)\n <...>\n >>> plt.show()\n " }, @@ -162719,7 +162036,7 @@ "qname": "sklearn.metrics._plot.det_curve.DetCurveDisplay.from_predictions.cls", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -162732,7 +162049,7 @@ "qname": "sklearn.metrics._plot.det_curve.DetCurveDisplay.from_predictions.y_true", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "True labels." @@ -162748,7 +162065,7 @@ "qname": "sklearn.metrics._plot.det_curve.DetCurveDisplay.from_predictions.y_pred", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "Target scores, can either be probability estimates of the positive\nclass, confidence values, or non-thresholded measure of decisions\n(as returned by `decision_function` on some classifiers)." @@ -162764,7 +162081,7 @@ "qname": "sklearn.metrics._plot.det_curve.DetCurveDisplay.from_predictions.sample_weight", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "Sample weights." @@ -162780,7 +162097,7 @@ "qname": "sklearn.metrics._plot.det_curve.DetCurveDisplay.from_predictions.pos_label", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str or int, default=None", "description": "The label of the positive class. When `pos_label=None`, if `y_true`\nis in {-1, 1} or {0, 1}, `pos_label` is set to 1, otherwise an\nerror will be raised." @@ -162805,7 +162122,7 @@ "qname": "sklearn.metrics._plot.det_curve.DetCurveDisplay.from_predictions.name", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str, default=None", "description": "Name of DET curve for labeling. If `None`, name will be set to\n`\"Classifier\"`." @@ -162821,7 +162138,7 @@ "qname": "sklearn.metrics._plot.det_curve.DetCurveDisplay.from_predictions.ax", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "matplotlib axes, default=None", "description": "Axes object to plot on. If `None`, a new figure and axes is\ncreated." @@ -162833,7 +162150,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Plot DET curve given the true and\npredicted labels.\n\nRead more in the :ref:`User Guide `.\n\n.. versionadded:: 1.0", "docstring": "Plot DET curve given the true and\n predicted labels.\n\n Read more in the :ref:`User Guide `.\n\n .. versionadded:: 1.0\n\n Parameters\n ----------\n y_true : array-like of shape (n_samples,)\n True labels.\n\n y_pred : array-like of shape (n_samples,)\n Target scores, can either be probability estimates of the positive\n class, confidence values, or non-thresholded measure of decisions\n (as returned by `decision_function` on some classifiers).\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n pos_label : str or int, default=None\n The label of the positive class. When `pos_label=None`, if `y_true`\n is in {-1, 1} or {0, 1}, `pos_label` is set to 1, otherwise an\n error will be raised.\n\n name : str, default=None\n Name of DET curve for labeling. If `None`, name will be set to\n `\"Classifier\"`.\n\n ax : matplotlib axes, default=None\n Axes object to plot on. If `None`, a new figure and axes is\n created.\n\n **kwargs : dict\n Additional keywords arguments passed to matplotlib `plot` function.\n\n Returns\n -------\n display : :class:`~sklearn.metrics.DetCurveDisplay`\n Object that stores computed values.\n\n See Also\n --------\n det_curve : Compute error rates for different probability thresholds.\n DetCurveDisplay.from_estimator : Plot DET curve given an estimator and\n some data.\n plot_roc_curve : Plot Receiver operating characteristic (ROC) curve.\n\n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> from sklearn.datasets import make_classification\n >>> from sklearn.metrics import DetCurveDisplay\n >>> from sklearn.model_selection import train_test_split\n >>> from sklearn.svm import SVC\n >>> X, y = make_classification(n_samples=1000, random_state=0)\n >>> X_train, X_test, y_train, y_test = train_test_split(\n ... X, y, test_size=0.4, random_state=0)\n >>> clf = SVC(random_state=0).fit(X_train, y_train)\n >>> y_pred = clf.decision_function(X_test)\n >>> DetCurveDisplay.from_predictions(\n ... y_test, y_pred)\n <...>\n >>> plt.show()\n " }, @@ -162849,7 +162166,7 @@ "qname": "sklearn.metrics._plot.det_curve.DetCurveDisplay.plot.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -162862,7 +162179,7 @@ "qname": "sklearn.metrics._plot.det_curve.DetCurveDisplay.plot.ax", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "matplotlib axes, default=None", "description": "Axes object to plot on. If `None`, a new figure and axes is\ncreated." @@ -162878,7 +162195,7 @@ "qname": "sklearn.metrics._plot.det_curve.DetCurveDisplay.plot.name", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str, default=None", "description": "Name of DET curve for labeling. If `None`, use `estimator_name` if\nit is not `None`, otherwise no labeling is shown." @@ -162890,7 +162207,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Plot visualization.", "docstring": "Plot visualization.\n\n Parameters\n ----------\n ax : matplotlib axes, default=None\n Axes object to plot on. If `None`, a new figure and axes is\n created.\n\n name : str, default=None\n Name of DET curve for labeling. If `None`, use `estimator_name` if\n it is not `None`, otherwise no labeling is shown.\n\n **kwargs : dict\n Additional keywords arguments passed to matplotlib `plot` function.\n\n Returns\n -------\n display : :class:`~sklearn.metrics.plot.DetCurveDisplay`\n Object that stores computed values.\n " }, @@ -162988,7 +162305,7 @@ }, "type": { "kind": "EnumType", - "values": ["decision_function", "auto", "predict_proba"] + "values": ["auto", "predict_proba", "decision_function"] } }, { @@ -163066,7 +162383,7 @@ "qname": "sklearn.metrics._plot.precision_recall_curve.PrecisionRecallDisplay.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -163079,7 +162396,7 @@ "qname": "sklearn.metrics._plot.precision_recall_curve.PrecisionRecallDisplay.__init__.precision", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray", "description": "Precision values." @@ -163095,7 +162412,7 @@ "qname": "sklearn.metrics._plot.precision_recall_curve.PrecisionRecallDisplay.__init__.recall", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray", "description": "Recall values." @@ -163111,7 +162428,7 @@ "qname": "sklearn.metrics._plot.precision_recall_curve.PrecisionRecallDisplay.__init__.average_precision", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=None", "description": "Average precision. If None, the average precision is not shown." @@ -163127,7 +162444,7 @@ "qname": "sklearn.metrics._plot.precision_recall_curve.PrecisionRecallDisplay.__init__.estimator_name", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str, default=None", "description": "Name of estimator. If None, then the estimator name is not shown." @@ -163143,7 +162460,7 @@ "qname": "sklearn.metrics._plot.precision_recall_curve.PrecisionRecallDisplay.__init__.pos_label", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str or int, default=None", "description": "The class considered as the positive class. If None, the class will not\nbe shown in the legend.\n\n.. versionadded:: 0.24" @@ -163164,7 +162481,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -163180,7 +162497,7 @@ "qname": "sklearn.metrics._plot.precision_recall_curve.PrecisionRecallDisplay.from_estimator.cls", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -163193,7 +162510,7 @@ "qname": "sklearn.metrics._plot.precision_recall_curve.PrecisionRecallDisplay.from_estimator.estimator", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "estimator instance", "description": "Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline`\nin which the last estimator is a classifier." @@ -163209,7 +162526,7 @@ "qname": "sklearn.metrics._plot.precision_recall_curve.PrecisionRecallDisplay.from_estimator.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Input values." @@ -163234,7 +162551,7 @@ "qname": "sklearn.metrics._plot.precision_recall_curve.PrecisionRecallDisplay.from_estimator.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "Target values." @@ -163250,7 +162567,7 @@ "qname": "sklearn.metrics._plot.precision_recall_curve.PrecisionRecallDisplay.from_estimator.sample_weight", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "Sample weights." @@ -163266,7 +162583,7 @@ "qname": "sklearn.metrics._plot.precision_recall_curve.PrecisionRecallDisplay.from_estimator.pos_label", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str or int, default=None", "description": "The class considered as the positive class when computing the\nprecision and recall metrics. By default, `estimators.classes_[1]`\nis considered as the positive class." @@ -163291,14 +162608,14 @@ "qname": "sklearn.metrics._plot.precision_recall_curve.PrecisionRecallDisplay.from_estimator.response_method", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'predict_proba', 'decision_function', 'auto'}, default='auto'", "description": "Specifies whether to use :term:`predict_proba` or\n:term:`decision_function` as the target response. If set to 'auto',\n:term:`predict_proba` is tried first and if it does not exist\n:term:`decision_function` is tried next." }, "type": { "kind": "EnumType", - "values": ["decision_function", "auto", "predict_proba"] + "values": ["auto", "predict_proba", "decision_function"] } }, { @@ -163307,7 +162624,7 @@ "qname": "sklearn.metrics._plot.precision_recall_curve.PrecisionRecallDisplay.from_estimator.name", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str, default=None", "description": "Name for labeling curve. If `None`, no name is used." @@ -163323,7 +162640,7 @@ "qname": "sklearn.metrics._plot.precision_recall_curve.PrecisionRecallDisplay.from_estimator.ax", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "matplotlib axes, default=None", "description": "Axes object to plot on. If `None`, a new figure and axes is created." @@ -163335,7 +162652,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Plot precision-recall curve given an estimator and some data.", "docstring": "Plot precision-recall curve given an estimator and some data.\n\n Parameters\n ----------\n estimator : estimator instance\n Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline`\n in which the last estimator is a classifier.\n\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Input values.\n\n y : array-like of shape (n_samples,)\n Target values.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n pos_label : str or int, default=None\n The class considered as the positive class when computing the\n precision and recall metrics. By default, `estimators.classes_[1]`\n is considered as the positive class.\n\n response_method : {'predict_proba', 'decision_function', 'auto'}, default='auto'\n Specifies whether to use :term:`predict_proba` or\n :term:`decision_function` as the target response. If set to 'auto',\n :term:`predict_proba` is tried first and if it does not exist\n :term:`decision_function` is tried next.\n\n name : str, default=None\n Name for labeling curve. If `None`, no name is used.\n\n ax : matplotlib axes, default=None\n Axes object to plot on. If `None`, a new figure and axes is created.\n\n **kwargs : dict\n Keyword arguments to be passed to matplotlib's `plot`.\n\n Returns\n -------\n display : :class:`~sklearn.metrics.PrecisionRecallDisplay`\n\n See Also\n --------\n PrecisionRecallDisplay.from_predictions : Plot precision-recall curve\n using estimated probabilities or output of decision function.\n\n Notes\n -----\n The average precision (cf. :func:`~sklearn.metrics.average_precision`)\n in scikit-learn is computed without any interpolation. To be consistent\n with this metric, the precision-recall curve is plotted without any\n interpolation as well (step-wise style).\n\n You can change this style by passing the keyword argument\n `drawstyle=\"default\"`. However, the curve will not be strictly\n consistent with the reported average precision.\n\n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> from sklearn.datasets import make_classification\n >>> from sklearn.metrics import PrecisionRecallDisplay\n >>> from sklearn.model_selection import train_test_split\n >>> from sklearn.linear_model import LogisticRegression\n >>> X, y = make_classification(random_state=0)\n >>> X_train, X_test, y_train, y_test = train_test_split(\n ... X, y, random_state=0)\n >>> clf = LogisticRegression()\n >>> clf.fit(X_train, y_train)\n LogisticRegression()\n >>> PrecisionRecallDisplay.from_estimator(\n ... clf, X_test, y_test)\n <...>\n >>> plt.show()\n " }, @@ -163351,7 +162668,7 @@ "qname": "sklearn.metrics._plot.precision_recall_curve.PrecisionRecallDisplay.from_predictions.cls", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -163364,7 +162681,7 @@ "qname": "sklearn.metrics._plot.precision_recall_curve.PrecisionRecallDisplay.from_predictions.y_true", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "True binary labels." @@ -163380,7 +162697,7 @@ "qname": "sklearn.metrics._plot.precision_recall_curve.PrecisionRecallDisplay.from_predictions.y_pred", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "Estimated probabilities or output of decision function." @@ -163396,7 +162713,7 @@ "qname": "sklearn.metrics._plot.precision_recall_curve.PrecisionRecallDisplay.from_predictions.sample_weight", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "Sample weights." @@ -163412,7 +162729,7 @@ "qname": "sklearn.metrics._plot.precision_recall_curve.PrecisionRecallDisplay.from_predictions.pos_label", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str or int, default=None", "description": "The class considered as the positive class when computing the\nprecision and recall metrics." @@ -163437,7 +162754,7 @@ "qname": "sklearn.metrics._plot.precision_recall_curve.PrecisionRecallDisplay.from_predictions.name", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str, default=None", "description": "Name for labeling curve. If `None`, name will be set to\n`\"Classifier\"`." @@ -163453,7 +162770,7 @@ "qname": "sklearn.metrics._plot.precision_recall_curve.PrecisionRecallDisplay.from_predictions.ax", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "matplotlib axes, default=None", "description": "Axes object to plot on. If `None`, a new figure and axes is created." @@ -163465,7 +162782,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Plot precision-recall curve given binary class predictions.", "docstring": "Plot precision-recall curve given binary class predictions.\n\n Parameters\n ----------\n y_true : array-like of shape (n_samples,)\n True binary labels.\n\n y_pred : array-like of shape (n_samples,)\n Estimated probabilities or output of decision function.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n pos_label : str or int, default=None\n The class considered as the positive class when computing the\n precision and recall metrics.\n\n name : str, default=None\n Name for labeling curve. If `None`, name will be set to\n `\"Classifier\"`.\n\n ax : matplotlib axes, default=None\n Axes object to plot on. If `None`, a new figure and axes is created.\n\n **kwargs : dict\n Keyword arguments to be passed to matplotlib's `plot`.\n\n Returns\n -------\n display : :class:`~sklearn.metrics.PrecisionRecallDisplay`\n\n See Also\n --------\n PrecisionRecallDisplay.from_estimator : Plot precision-recall curve\n using an estimator.\n\n Notes\n -----\n The average precision (cf. :func:`~sklearn.metrics.average_precision`)\n in scikit-learn is computed without any interpolation. To be consistent\n with this metric, the precision-recall curve is plotted without any\n interpolation as well (step-wise style).\n\n You can change this style by passing the keyword argument\n `drawstyle=\"default\"`. However, the curve will not be strictly\n consistent with the reported average precision.\n\n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> from sklearn.datasets import make_classification\n >>> from sklearn.metrics import PrecisionRecallDisplay\n >>> from sklearn.model_selection import train_test_split\n >>> from sklearn.linear_model import LogisticRegression\n >>> X, y = make_classification(random_state=0)\n >>> X_train, X_test, y_train, y_test = train_test_split(\n ... X, y, random_state=0)\n >>> clf = LogisticRegression()\n >>> clf.fit(X_train, y_train)\n LogisticRegression()\n >>> y_pred = clf.predict_proba(X_test)[:, 1]\n >>> PrecisionRecallDisplay.from_predictions(\n ... y_test, y_pred)\n <...>\n >>> plt.show()\n " }, @@ -163481,7 +162798,7 @@ "qname": "sklearn.metrics._plot.precision_recall_curve.PrecisionRecallDisplay.plot.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -163494,7 +162811,7 @@ "qname": "sklearn.metrics._plot.precision_recall_curve.PrecisionRecallDisplay.plot.ax", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Matplotlib Axes, default=None", "description": "Axes object to plot on. If `None`, a new figure and axes is\ncreated." @@ -163510,7 +162827,7 @@ "qname": "sklearn.metrics._plot.precision_recall_curve.PrecisionRecallDisplay.plot.name", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str, default=None", "description": "Name of precision recall curve for labeling. If `None`, use\n`estimator_name` if not `None`, otherwise no labeling is shown." @@ -163522,7 +162839,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Plot visualization.\n\nExtra keyword arguments will be passed to matplotlib's `plot`.", "docstring": "Plot visualization.\n\n Extra keyword arguments will be passed to matplotlib's `plot`.\n\n Parameters\n ----------\n ax : Matplotlib Axes, default=None\n Axes object to plot on. If `None`, a new figure and axes is\n created.\n\n name : str, default=None\n Name of precision recall curve for labeling. If `None`, use\n `estimator_name` if not `None`, otherwise no labeling is shown.\n\n **kwargs : dict\n Keyword arguments to be passed to matplotlib's `plot`.\n\n Returns\n -------\n display : :class:`~sklearn.metrics.PrecisionRecallDisplay`\n Object that stores computed values.\n\n Notes\n -----\n The average precision (cf. :func:`~sklearn.metrics.average_precision`)\n in scikit-learn is computed without any interpolation. To be consistent\n with this metric, the precision-recall curve is plotted without any\n interpolation as well (step-wise style).\n\n You can change this style by passing the keyword argument\n `drawstyle=\"default\"`. However, the curve will not be strictly\n consistent with the reported average precision.\n " }, @@ -163620,7 +162937,7 @@ }, "type": { "kind": "EnumType", - "values": ["decision_function", "auto", "predict_proba"] + "values": ["auto", "predict_proba", "decision_function"] } }, { @@ -163698,7 +163015,7 @@ "qname": "sklearn.metrics._plot.roc_curve.RocCurveDisplay.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -163711,7 +163028,7 @@ "qname": "sklearn.metrics._plot.roc_curve.RocCurveDisplay.__init__.fpr", "default_value": null, "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray", "description": "False positive rate." @@ -163727,7 +163044,7 @@ "qname": "sklearn.metrics._plot.roc_curve.RocCurveDisplay.__init__.tpr", "default_value": null, "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray", "description": "True positive rate." @@ -163743,7 +163060,7 @@ "qname": "sklearn.metrics._plot.roc_curve.RocCurveDisplay.__init__.roc_auc", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=None", "description": "Area under ROC curve. If None, the roc_auc score is not shown." @@ -163759,7 +163076,7 @@ "qname": "sklearn.metrics._plot.roc_curve.RocCurveDisplay.__init__.estimator_name", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str, default=None", "description": "Name of estimator. If None, the estimator name is not shown." @@ -163775,7 +163092,7 @@ "qname": "sklearn.metrics._plot.roc_curve.RocCurveDisplay.__init__.pos_label", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str or int, default=None", "description": "The class considered as the positive class when computing the roc auc\nmetrics. By default, `estimators.classes_[1]` is considered\nas the positive class.\n\n.. versionadded:: 0.24" @@ -163796,7 +163113,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -163812,7 +163129,7 @@ "qname": "sklearn.metrics._plot.roc_curve.RocCurveDisplay.from_estimator.cls", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -163825,7 +163142,7 @@ "qname": "sklearn.metrics._plot.roc_curve.RocCurveDisplay.from_estimator.estimator", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "estimator instance", "description": "Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline`\nin which the last estimator is a classifier." @@ -163841,7 +163158,7 @@ "qname": "sklearn.metrics._plot.roc_curve.RocCurveDisplay.from_estimator.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Input values." @@ -163866,7 +163183,7 @@ "qname": "sklearn.metrics._plot.roc_curve.RocCurveDisplay.from_estimator.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "Target values." @@ -163882,7 +163199,7 @@ "qname": "sklearn.metrics._plot.roc_curve.RocCurveDisplay.from_estimator.sample_weight", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "Sample weights." @@ -163898,7 +163215,7 @@ "qname": "sklearn.metrics._plot.roc_curve.RocCurveDisplay.from_estimator.drop_intermediate", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to drop some suboptimal thresholds which would not appear\non a plotted ROC curve. This is useful in order to create lighter\nROC curves." @@ -163914,14 +163231,14 @@ "qname": "sklearn.metrics._plot.roc_curve.RocCurveDisplay.from_estimator.response_method", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'predict_proba', 'decision_function', 'auto'} default='auto'", "description": "Specifies whether to use :term:`predict_proba` or\n:term:`decision_function` as the target response. If set to 'auto',\n:term:`predict_proba` is tried first and if it does not exist\n:term:`decision_function` is tried next." }, "type": { "kind": "EnumType", - "values": ["decision_function", "auto", "predict_proba"] + "values": ["auto", "predict_proba", "decision_function"] } }, { @@ -163930,7 +163247,7 @@ "qname": "sklearn.metrics._plot.roc_curve.RocCurveDisplay.from_estimator.pos_label", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str or int, default=None", "description": "The class considered as the positive class when computing the roc auc\nmetrics. By default, `estimators.classes_[1]` is considered\nas the positive class." @@ -163955,7 +163272,7 @@ "qname": "sklearn.metrics._plot.roc_curve.RocCurveDisplay.from_estimator.name", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str, default=None", "description": "Name of ROC Curve for labeling. If `None`, use the name of the\nestimator." @@ -163971,7 +163288,7 @@ "qname": "sklearn.metrics._plot.roc_curve.RocCurveDisplay.from_estimator.ax", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "matplotlib axes, default=None", "description": "Axes object to plot on. If `None`, a new figure and axes is created." @@ -163983,7 +163300,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Create a ROC Curve display from an estimator.", "docstring": "Create a ROC Curve display from an estimator.\n\n Parameters\n ----------\n estimator : estimator instance\n Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline`\n in which the last estimator is a classifier.\n\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Input values.\n\n y : array-like of shape (n_samples,)\n Target values.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n drop_intermediate : bool, default=True\n Whether to drop some suboptimal thresholds which would not appear\n on a plotted ROC curve. This is useful in order to create lighter\n ROC curves.\n\n response_method : {'predict_proba', 'decision_function', 'auto'} default='auto'\n Specifies whether to use :term:`predict_proba` or\n :term:`decision_function` as the target response. If set to 'auto',\n :term:`predict_proba` is tried first and if it does not exist\n :term:`decision_function` is tried next.\n\n pos_label : str or int, default=None\n The class considered as the positive class when computing the roc auc\n metrics. By default, `estimators.classes_[1]` is considered\n as the positive class.\n\n name : str, default=None\n Name of ROC Curve for labeling. If `None`, use the name of the\n estimator.\n\n ax : matplotlib axes, default=None\n Axes object to plot on. If `None`, a new figure and axes is created.\n\n **kwargs : dict\n Keyword arguments to be passed to matplotlib's `plot`.\n\n Returns\n -------\n display : :class:`~sklearn.metrics.plot.RocCurveDisplay`\n The ROC Curve display.\n\n See Also\n --------\n roc_curve : Compute Receiver operating characteristic (ROC) curve.\n RocCurveDisplay.from_predictions : ROC Curve visualization given the\n probabilities of scores of a classifier.\n roc_auc_score : Compute the area under the ROC curve.\n\n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> from sklearn.datasets import make_classification\n >>> from sklearn.metrics import RocCurveDisplay\n >>> from sklearn.model_selection import train_test_split\n >>> from sklearn.svm import SVC\n >>> X, y = make_classification(random_state=0)\n >>> X_train, X_test, y_train, y_test = train_test_split(\n ... X, y, random_state=0)\n >>> clf = SVC(random_state=0).fit(X_train, y_train)\n >>> RocCurveDisplay.from_estimator(\n ... clf, X_test, y_test)\n <...>\n >>> plt.show()\n " }, @@ -163999,7 +163316,7 @@ "qname": "sklearn.metrics._plot.roc_curve.RocCurveDisplay.from_predictions.cls", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -164012,7 +163329,7 @@ "qname": "sklearn.metrics._plot.roc_curve.RocCurveDisplay.from_predictions.y_true", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "True labels." @@ -164028,7 +163345,7 @@ "qname": "sklearn.metrics._plot.roc_curve.RocCurveDisplay.from_predictions.y_pred", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "Target scores, can either be probability estimates of the positive\nclass, confidence values, or non-thresholded measure of decisions\n(as returned by \u201cdecision_function\u201d on some classifiers)." @@ -164044,7 +163361,7 @@ "qname": "sklearn.metrics._plot.roc_curve.RocCurveDisplay.from_predictions.sample_weight", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "Sample weights." @@ -164060,7 +163377,7 @@ "qname": "sklearn.metrics._plot.roc_curve.RocCurveDisplay.from_predictions.drop_intermediate", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to drop some suboptimal thresholds which would not appear\non a plotted ROC curve. This is useful in order to create lighter\nROC curves." @@ -164076,7 +163393,7 @@ "qname": "sklearn.metrics._plot.roc_curve.RocCurveDisplay.from_predictions.pos_label", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str or int, default=None", "description": "The label of the positive class. When `pos_label=None`, if `y_true`\nis in {-1, 1} or {0, 1}, `pos_label` is set to 1, otherwise an\nerror will be raised." @@ -164101,7 +163418,7 @@ "qname": "sklearn.metrics._plot.roc_curve.RocCurveDisplay.from_predictions.name", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str, default=None", "description": "Name of ROC curve for labeling. If `None`, name will be set to\n`\"Classifier\"`." @@ -164117,7 +163434,7 @@ "qname": "sklearn.metrics._plot.roc_curve.RocCurveDisplay.from_predictions.ax", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "matplotlib axes, default=None", "description": "Axes object to plot on. If `None`, a new figure and axes is\ncreated." @@ -164129,7 +163446,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Plot ROC curve given the true and predicted values.\n\nRead more in the :ref:`User Guide `.\n\n.. versionadded:: 1.0", "docstring": "Plot ROC curve given the true and predicted values.\n\n Read more in the :ref:`User Guide `.\n\n .. versionadded:: 1.0\n\n Parameters\n ----------\n y_true : array-like of shape (n_samples,)\n True labels.\n\n y_pred : array-like of shape (n_samples,)\n Target scores, can either be probability estimates of the positive\n class, confidence values, or non-thresholded measure of decisions\n (as returned by \u201cdecision_function\u201d on some classifiers).\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n drop_intermediate : bool, default=True\n Whether to drop some suboptimal thresholds which would not appear\n on a plotted ROC curve. This is useful in order to create lighter\n ROC curves.\n\n pos_label : str or int, default=None\n The label of the positive class. When `pos_label=None`, if `y_true`\n is in {-1, 1} or {0, 1}, `pos_label` is set to 1, otherwise an\n error will be raised.\n\n name : str, default=None\n Name of ROC curve for labeling. If `None`, name will be set to\n `\"Classifier\"`.\n\n ax : matplotlib axes, default=None\n Axes object to plot on. If `None`, a new figure and axes is\n created.\n\n **kwargs : dict\n Additional keywords arguments passed to matplotlib `plot` function.\n\n Returns\n -------\n display : :class:`~sklearn.metrics.RocCurveDisplay`\n Object that stores computed values.\n\n See Also\n --------\n roc_curve : Compute Receiver operating characteristic (ROC) curve.\n RocCurveDisplay.from_estimator : ROC Curve visualization given an\n estimator and some data.\n roc_auc_score : Compute the area under the ROC curve.\n\n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> from sklearn.datasets import make_classification\n >>> from sklearn.metrics import RocCurveDisplay\n >>> from sklearn.model_selection import train_test_split\n >>> from sklearn.svm import SVC\n >>> X, y = make_classification(random_state=0)\n >>> X_train, X_test, y_train, y_test = train_test_split(\n ... X, y, random_state=0)\n >>> clf = SVC(random_state=0).fit(X_train, y_train)\n >>> y_pred = clf.decision_function(X_test)\n >>> RocCurveDisplay.from_predictions(\n ... y_test, y_pred)\n <...>\n >>> plt.show()\n " }, @@ -164145,7 +163462,7 @@ "qname": "sklearn.metrics._plot.roc_curve.RocCurveDisplay.plot.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -164158,7 +163475,7 @@ "qname": "sklearn.metrics._plot.roc_curve.RocCurveDisplay.plot.ax", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "matplotlib axes, default=None", "description": "Axes object to plot on. If `None`, a new figure and axes is\ncreated." @@ -164174,7 +163491,7 @@ "qname": "sklearn.metrics._plot.roc_curve.RocCurveDisplay.plot.name", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str, default=None", "description": "Name of ROC Curve for labeling. If `None`, use `estimator_name` if\nnot `None`, otherwise no labeling is shown." @@ -164186,7 +163503,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Plot visualization\n\nExtra keyword arguments will be passed to matplotlib's ``plot``.", "docstring": "Plot visualization\n\n Extra keyword arguments will be passed to matplotlib's ``plot``.\n\n Parameters\n ----------\n ax : matplotlib axes, default=None\n Axes object to plot on. If `None`, a new figure and axes is\n created.\n\n name : str, default=None\n Name of ROC Curve for labeling. If `None`, use `estimator_name` if\n not `None`, otherwise no labeling is shown.\n\n Returns\n -------\n display : :class:`~sklearn.metrics.plot.RocCurveDisplay`\n Object that stores computed values.\n " }, @@ -164300,7 +163617,7 @@ }, "type": { "kind": "EnumType", - "values": ["decision_function", "auto", "predict_proba"] + "values": ["auto", "predict_proba", "decision_function"] } }, { @@ -164909,7 +164226,7 @@ "is_public": true, "docstring": { "type": "ndarray of shape (n,)", - "description": "X coordinates. These must be either monotonic increasing or monotonic\ndecreasing." + "description": "x coordinates. These must be either monotonic increasing or monotonic\ndecreasing." }, "type": { "kind": "NamedType", @@ -164925,7 +164242,7 @@ "is_public": true, "docstring": { "type": "ndarray of shape, (n,)", - "description": "Y coordinates." + "description": "y coordinates." }, "type": { "kind": "UnionType", @@ -164945,7 +164262,7 @@ "results": [], "is_public": true, "description": "Compute Area Under the Curve (AUC) using the trapezoidal rule.\n\nThis is a general function, given points on a curve. For computing the\narea under the ROC-curve, see :func:`roc_auc_score`. For an alternative\nway to summarize a precision-recall curve, see\n:func:`average_precision_score`.", - "docstring": "Compute Area Under the Curve (AUC) using the trapezoidal rule.\n\n This is a general function, given points on a curve. For computing the\n area under the ROC-curve, see :func:`roc_auc_score`. For an alternative\n way to summarize a precision-recall curve, see\n :func:`average_precision_score`.\n\n Parameters\n ----------\n x : ndarray of shape (n,)\n X coordinates. These must be either monotonic increasing or monotonic\n decreasing.\n y : ndarray of shape, (n,)\n Y coordinates.\n\n Returns\n -------\n auc : float\n Area Under the Curve.\n\n See Also\n --------\n roc_auc_score : Compute the area under the ROC curve.\n average_precision_score : Compute average precision from prediction scores.\n precision_recall_curve : Compute precision-recall pairs for different\n probability thresholds.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn import metrics\n >>> y = np.array([1, 1, 2, 2])\n >>> pred = np.array([0.1, 0.4, 0.35, 0.8])\n >>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)\n >>> metrics.auc(fpr, tpr)\n 0.75\n " + "docstring": "Compute Area Under the Curve (AUC) using the trapezoidal rule.\n\n This is a general function, given points on a curve. For computing the\n area under the ROC-curve, see :func:`roc_auc_score`. For an alternative\n way to summarize a precision-recall curve, see\n :func:`average_precision_score`.\n\n Parameters\n ----------\n x : ndarray of shape (n,)\n x coordinates. These must be either monotonic increasing or monotonic\n decreasing.\n y : ndarray of shape, (n,)\n y coordinates.\n\n Returns\n -------\n auc : float\n\n See Also\n --------\n roc_auc_score : Compute the area under the ROC curve.\n average_precision_score : Compute average precision from prediction scores.\n precision_recall_curve : Compute precision-recall pairs for different\n probability thresholds.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn import metrics\n >>> y = np.array([1, 1, 2, 2])\n >>> pred = np.array([0.1, 0.4, 0.35, 0.8])\n >>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)\n >>> metrics.auc(fpr, tpr)\n 0.75\n " }, { "id": "sklearn/sklearn.metrics._ranking/average_precision_score", @@ -165001,7 +164318,7 @@ "types": [ { "kind": "EnumType", - "values": ["macro", "samples", "weighted", "micro"] + "values": ["macro", "micro", "samples", "weighted"] }, { "kind": "NamedType", @@ -165679,7 +164996,7 @@ "types": [ { "kind": "EnumType", - "values": ["macro", "samples", "weighted", "micro"] + "values": ["macro", "micro", "samples", "weighted"] }, { "kind": "NamedType", @@ -165733,7 +165050,7 @@ }, "type": { "kind": "EnumType", - "values": ["ovr", "raise", "ovo"] + "values": ["raise", "ovr", "ovo"] } }, { @@ -166565,7 +165882,7 @@ "types": [ { "kind": "EnumType", - "values": ["uniform_average", "variance_weighted", "raw_values"] + "values": ["variance_weighted", "uniform_average", "raw_values"] }, { "kind": "NamedType", @@ -167461,7 +166778,7 @@ "types": [ { "kind": "EnumType", - "values": ["uniform_average", "variance_weighted", "raw_values"] + "values": ["variance_weighted", "uniform_average", "raw_values"] }, { "kind": "NamedType", @@ -168624,82 +167941,6 @@ "description": "Make a scorer from a performance metric or loss function.\n\nThis factory function wraps scoring functions for use in\n:class:`~sklearn.model_selection.GridSearchCV` and\n:func:`~sklearn.model_selection.cross_val_score`.\nIt takes a score function, such as :func:`~sklearn.metrics.accuracy_score`,\n:func:`~sklearn.metrics.mean_squared_error`,\n:func:`~sklearn.metrics.adjusted_rand_score` or\n:func:`~sklearn.metrics.average_precision_score`\nand returns a callable that scores an estimator's output.\nThe signature of the call is `(estimator, X, y)` where `estimator`\nis the model to be evaluated, `X` is the data and `y` is the\nground truth labeling (or `None` in the case of unsupervised models).\n\nRead more in the :ref:`User Guide `.", "docstring": "Make a scorer from a performance metric or loss function.\n\n This factory function wraps scoring functions for use in\n :class:`~sklearn.model_selection.GridSearchCV` and\n :func:`~sklearn.model_selection.cross_val_score`.\n It takes a score function, such as :func:`~sklearn.metrics.accuracy_score`,\n :func:`~sklearn.metrics.mean_squared_error`,\n :func:`~sklearn.metrics.adjusted_rand_score` or\n :func:`~sklearn.metrics.average_precision_score`\n and returns a callable that scores an estimator's output.\n The signature of the call is `(estimator, X, y)` where `estimator`\n is the model to be evaluated, `X` is the data and `y` is the\n ground truth labeling (or `None` in the case of unsupervised models).\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n score_func : callable\n Score function (or loss function) with signature\n `score_func(y, y_pred, **kwargs)`.\n\n greater_is_better : bool, default=True\n Whether `score_func` is a score function (default), meaning high is\n good, or a loss function, meaning low is good. In the latter case, the\n scorer object will sign-flip the outcome of the `score_func`.\n\n needs_proba : bool, default=False\n Whether `score_func` requires `predict_proba` to get probability\n estimates out of a classifier.\n\n If True, for binary `y_true`, the score function is supposed to accept\n a 1D `y_pred` (i.e., probability of the positive class, shape\n `(n_samples,)`).\n\n needs_threshold : bool, default=False\n Whether `score_func` takes a continuous decision certainty.\n This only works for binary classification using estimators that\n have either a `decision_function` or `predict_proba` method.\n\n If True, for binary `y_true`, the score function is supposed to accept\n a 1D `y_pred` (i.e., probability of the positive class or the decision\n function, shape `(n_samples,)`).\n\n For example `average_precision` or the area under the roc curve\n can not be computed using discrete predictions alone.\n\n **kwargs : additional arguments\n Additional parameters to be passed to `score_func`.\n\n Returns\n -------\n scorer : callable\n Callable object that returns a scalar score; greater is better.\n\n Notes\n -----\n If `needs_proba=False` and `needs_threshold=False`, the score\n function is supposed to accept the output of :term:`predict`. If\n `needs_proba=True`, the score function is supposed to accept the\n output of :term:`predict_proba` (For binary `y_true`, the score function is\n supposed to accept probability of the positive class). If\n `needs_threshold=True`, the score function is supposed to accept the\n output of :term:`decision_function` or :term:`predict_proba` when\n :term:`decision_function` is not present.\n\n Examples\n --------\n >>> from sklearn.metrics import fbeta_score, make_scorer\n >>> ftwo_scorer = make_scorer(fbeta_score, beta=2)\n >>> ftwo_scorer\n make_scorer(fbeta_score, beta=2)\n >>> from sklearn.model_selection import GridSearchCV\n >>> from sklearn.svm import LinearSVC\n >>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},\n ... scoring=ftwo_scorer)\n " }, - { - "id": "sklearn/sklearn.metrics._scorer/negative_likelihood_ratio", - "name": "negative_likelihood_ratio", - "qname": "sklearn.metrics._scorer.negative_likelihood_ratio", - "decorators": [], - "parameters": [ - { - "id": "sklearn/sklearn.metrics._scorer/negative_likelihood_ratio/y_true", - "name": "y_true", - "qname": "sklearn.metrics._scorer.negative_likelihood_ratio.y_true", - "default_value": null, - "assigned_by": "POSITION_OR_NAME", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - }, - { - "id": "sklearn/sklearn.metrics._scorer/negative_likelihood_ratio/y_pred", - "name": "y_pred", - "qname": "sklearn.metrics._scorer.negative_likelihood_ratio.y_pred", - "default_value": null, - "assigned_by": "POSITION_OR_NAME", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - } - ], - "results": [], - "is_public": false, - "description": "", - "docstring": null - }, - { - "id": "sklearn/sklearn.metrics._scorer/positive_likelihood_ratio", - "name": "positive_likelihood_ratio", - "qname": "sklearn.metrics._scorer.positive_likelihood_ratio", - "decorators": [], - "parameters": [ - { - "id": "sklearn/sklearn.metrics._scorer/positive_likelihood_ratio/y_true", - "name": "y_true", - "qname": "sklearn.metrics._scorer.positive_likelihood_ratio.y_true", - "default_value": null, - "assigned_by": "POSITION_OR_NAME", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - }, - { - "id": "sklearn/sklearn.metrics._scorer/positive_likelihood_ratio/y_pred", - "name": "y_pred", - "qname": "sklearn.metrics._scorer.positive_likelihood_ratio.y_pred", - "default_value": null, - "assigned_by": "POSITION_OR_NAME", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - } - ], - "results": [], - "is_public": false, - "description": "", - "docstring": null - }, { "id": "sklearn/sklearn.metrics.cluster._bicluster/_check_rows_and_columns", "name": "_check_rows_and_columns", @@ -172101,8 +171342,8 @@ ], "results": [], "is_public": true, - "description": "Compute the distance matrix from a vector array X and optional Y.\n\nThis method takes either a vector array or a distance matrix, and returns\na distance matrix. If the input is a vector array, the distances are\ncomputed. If the input is a distances matrix, it is returned instead.\n\nThis method provides a safe way to take a distance matrix as input, while\npreserving compatibility with many other algorithms that take a vector\narray.\n\nIf Y is given (default is None), then the returned matrix is the pairwise\ndistance between the arrays from both X and Y.\n\nValid values for metric are:\n\n- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',\n 'manhattan']. These metrics support sparse matrix\n inputs.\n ['nan_euclidean'] but it does not yet support sparse matrices.\n\n- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',\n 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',\n 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',\n 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']\n See the documentation for scipy.spatial.distance for details on these\n metrics. These metrics do not support sparse matrix inputs.\n\nNote that in the case of 'cityblock', 'cosine' and 'euclidean' (which are\nvalid scipy.spatial.distance metrics), the scikit-learn implementation\nwill be used, which is faster and has support for sparse matrices (except\nfor 'cityblock'). For a verbose description of the metrics from\nscikit-learn, see :func:`sklearn.metrics.pairwise.distance_metrics`\nfunction.\n\nRead more in the :ref:`User Guide `.", - "docstring": "Compute the distance matrix from a vector array X and optional Y.\n\n This method takes either a vector array or a distance matrix, and returns\n a distance matrix. If the input is a vector array, the distances are\n computed. If the input is a distances matrix, it is returned instead.\n\n This method provides a safe way to take a distance matrix as input, while\n preserving compatibility with many other algorithms that take a vector\n array.\n\n If Y is given (default is None), then the returned matrix is the pairwise\n distance between the arrays from both X and Y.\n\n Valid values for metric are:\n\n - From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',\n 'manhattan']. These metrics support sparse matrix\n inputs.\n ['nan_euclidean'] but it does not yet support sparse matrices.\n\n - From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',\n 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',\n 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',\n 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']\n See the documentation for scipy.spatial.distance for details on these\n metrics. These metrics do not support sparse matrix inputs.\n\n Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are\n valid scipy.spatial.distance metrics), the scikit-learn implementation\n will be used, which is faster and has support for sparse matrices (except\n for 'cityblock'). For a verbose description of the metrics from\n scikit-learn, see :func:`sklearn.metrics.pairwise.distance_metrics`\n function.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples_X, n_samples_X) or (n_samples_X, n_features)\n Array of pairwise distances between samples, or a feature array.\n The shape of the array should be (n_samples_X, n_samples_X) if\n metric == \"precomputed\" and (n_samples_X, n_features) otherwise.\n\n Y : ndarray of shape (n_samples_Y, n_features), default=None\n An optional second feature array. Only allowed if\n metric != \"precomputed\".\n\n metric : str or callable, default='euclidean'\n The metric to use when calculating distance between instances in a\n feature array. If metric is a string, it must be one of the options\n allowed by scipy.spatial.distance.pdist for its metric parameter, or\n a metric listed in ``pairwise.PAIRWISE_DISTANCE_FUNCTIONS``.\n If metric is \"precomputed\", X is assumed to be a distance matrix.\n Alternatively, if metric is a callable function, it is called on each\n pair of instances (rows) and the resulting value recorded. The callable\n should take two arrays from X as input and return a value indicating\n the distance between them.\n\n n_jobs : int, default=None\n The number of jobs to use for the computation. This works by breaking\n down the pairwise matrix into n_jobs even slices and computing them in\n parallel.\n\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary `\n for more details.\n\n force_all_finite : bool or 'allow-nan', default=True\n Whether to raise an error on np.inf, np.nan, pd.NA in array. Ignored\n for a metric listed in ``pairwise.PAIRWISE_DISTANCE_FUNCTIONS``. The\n possibilities are:\n\n - True: Force all values of array to be finite.\n - False: accepts np.inf, np.nan, pd.NA in array.\n - 'allow-nan': accepts only np.nan and pd.NA values in array. Values\n cannot be infinite.\n\n .. versionadded:: 0.22\n ``force_all_finite`` accepts the string ``'allow-nan'``.\n\n .. versionchanged:: 0.23\n Accepts `pd.NA` and converts it into `np.nan`.\n\n **kwds : optional keyword parameters\n Any further parameters are passed directly to the distance function.\n If using a scipy.spatial.distance metric, the parameters are still\n metric dependent. See the scipy docs for usage examples.\n\n Returns\n -------\n D : ndarray of shape (n_samples_X, n_samples_X) or (n_samples_X, n_samples_Y)\n A distance matrix D such that D_{i, j} is the distance between the\n ith and jth vectors of the given matrix X, if Y is None.\n If Y is not None, then D_{i, j} is the distance between the ith array\n from X and the jth array from Y.\n\n See Also\n --------\n pairwise_distances_chunked : Performs the same calculation as this\n function, but returns a generator of chunks of the distance matrix, in\n order to limit memory usage.\n paired_distances : Computes the distances between corresponding elements\n of two arrays.\n " + "description": "Compute the distance matrix from a vector array X and optional Y.\n\nThis method takes either a vector array or a distance matrix, and returns\na distance matrix. If the input is a vector array, the distances are\ncomputed. If the input is a distances matrix, it is returned instead.\n\nThis method provides a safe way to take a distance matrix as input, while\npreserving compatibility with many other algorithms that take a vector\narray.\n\nIf Y is given (default is None), then the returned matrix is the pairwise\ndistance between the arrays from both X and Y.\n\nValid values for metric are:\n\n- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',\n 'manhattan']. These metrics support sparse matrix\n inputs.\n ['nan_euclidean'] but it does not yet support sparse matrices.\n\n- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',\n 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',\n 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',\n 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']\n See the documentation for scipy.spatial.distance for details on these\n metrics. These metrics do not support sparse matrix inputs.\n\nNote that in the case of 'cityblock', 'cosine' and 'euclidean' (which are\nvalid scipy.spatial.distance metrics), the scikit-learn implementation\nwill be used, which is faster and has support for sparse matrices (except\nfor 'cityblock'). For a verbose description of the metrics from\nscikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics\nfunction.\n\nRead more in the :ref:`User Guide `.", + "docstring": "Compute the distance matrix from a vector array X and optional Y.\n\n This method takes either a vector array or a distance matrix, and returns\n a distance matrix. If the input is a vector array, the distances are\n computed. If the input is a distances matrix, it is returned instead.\n\n This method provides a safe way to take a distance matrix as input, while\n preserving compatibility with many other algorithms that take a vector\n array.\n\n If Y is given (default is None), then the returned matrix is the pairwise\n distance between the arrays from both X and Y.\n\n Valid values for metric are:\n\n - From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',\n 'manhattan']. These metrics support sparse matrix\n inputs.\n ['nan_euclidean'] but it does not yet support sparse matrices.\n\n - From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',\n 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',\n 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',\n 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']\n See the documentation for scipy.spatial.distance for details on these\n metrics. These metrics do not support sparse matrix inputs.\n\n Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are\n valid scipy.spatial.distance metrics), the scikit-learn implementation\n will be used, which is faster and has support for sparse matrices (except\n for 'cityblock'). For a verbose description of the metrics from\n scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics\n function.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples_X, n_samples_X) or (n_samples_X, n_features)\n Array of pairwise distances between samples, or a feature array.\n The shape of the array should be (n_samples_X, n_samples_X) if\n metric == \"precomputed\" and (n_samples_X, n_features) otherwise.\n\n Y : ndarray of shape (n_samples_Y, n_features), default=None\n An optional second feature array. Only allowed if\n metric != \"precomputed\".\n\n metric : str or callable, default='euclidean'\n The metric to use when calculating distance between instances in a\n feature array. If metric is a string, it must be one of the options\n allowed by scipy.spatial.distance.pdist for its metric parameter, or\n a metric listed in ``pairwise.PAIRWISE_DISTANCE_FUNCTIONS``.\n If metric is \"precomputed\", X is assumed to be a distance matrix.\n Alternatively, if metric is a callable function, it is called on each\n pair of instances (rows) and the resulting value recorded. The callable\n should take two arrays from X as input and return a value indicating\n the distance between them.\n\n n_jobs : int, default=None\n The number of jobs to use for the computation. This works by breaking\n down the pairwise matrix into n_jobs even slices and computing them in\n parallel.\n\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary `\n for more details.\n\n force_all_finite : bool or 'allow-nan', default=True\n Whether to raise an error on np.inf, np.nan, pd.NA in array. Ignored\n for a metric listed in ``pairwise.PAIRWISE_DISTANCE_FUNCTIONS``. The\n possibilities are:\n\n - True: Force all values of array to be finite.\n - False: accepts np.inf, np.nan, pd.NA in array.\n - 'allow-nan': accepts only np.nan and pd.NA values in array. Values\n cannot be infinite.\n\n .. versionadded:: 0.22\n ``force_all_finite`` accepts the string ``'allow-nan'``.\n\n .. versionchanged:: 0.23\n Accepts `pd.NA` and converts it into `np.nan`.\n\n **kwds : optional keyword parameters\n Any further parameters are passed directly to the distance function.\n If using a scipy.spatial.distance metric, the parameters are still\n metric dependent. See the scipy docs for usage examples.\n\n Returns\n -------\n D : ndarray of shape (n_samples_X, n_samples_X) or (n_samples_X, n_samples_Y)\n A distance matrix D such that D_{i, j} is the distance between the\n ith and jth vectors of the given matrix X, if Y is None.\n If Y is not None, then D_{i, j} is the distance between the ith array\n from X and the jth array from Y.\n\n See Also\n --------\n pairwise_distances_chunked : Performs the same calculation as this\n function, but returns a generator of chunks of the distance matrix, in\n order to limit memory usage.\n paired_distances : Computes the distances between corresponding elements\n of two arrays.\n " }, { "id": "sklearn/sklearn.metrics.pairwise/pairwise_distances_argmin", @@ -174002,7 +173243,7 @@ "qname": "sklearn.mixture._bayesian_mixture.BayesianGaussianMixture.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -174015,7 +173256,7 @@ "qname": "sklearn.mixture._bayesian_mixture.BayesianGaussianMixture.__init__.n_components", "default_value": "1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1", "description": "The number of mixture components. Depending on the data and the value\nof the `weight_concentration_prior` the model can decide to not use\nall the components by setting some component `weights_` to values very\nclose to zero. The number of effective components is therefore smaller\nthan n_components." @@ -174031,14 +173272,14 @@ "qname": "sklearn.mixture._bayesian_mixture.BayesianGaussianMixture.__init__.covariance_type", "default_value": "'full'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'full', 'tied', 'diag', 'spherical'}, default='full'", "description": "String describing the type of covariance parameters to use.\nMust be one of::\n\n 'full' (each component has its own general covariance matrix),\n 'tied' (all components share the same general covariance matrix),\n 'diag' (each component has its own diagonal covariance matrix),\n 'spherical' (each component has its own single variance)." }, "type": { "kind": "EnumType", - "values": ["tied", "full", "diag", "spherical"] + "values": ["tied", "spherical", "full", "diag"] } }, { @@ -174047,7 +173288,7 @@ "qname": "sklearn.mixture._bayesian_mixture.BayesianGaussianMixture.__init__.tol", "default_value": "0.001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-3", "description": "The convergence threshold. EM iterations will stop when the\nlower bound average gain on the likelihood (of the training data with\nrespect to the model) is below this threshold." @@ -174063,7 +173304,7 @@ "qname": "sklearn.mixture._bayesian_mixture.BayesianGaussianMixture.__init__.reg_covar", "default_value": "1e-06", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-6", "description": "Non-negative regularization added to the diagonal of covariance.\nAllows to assure that the covariance matrices are all positive." @@ -174079,7 +173320,7 @@ "qname": "sklearn.mixture._bayesian_mixture.BayesianGaussianMixture.__init__.max_iter", "default_value": "100", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=100", "description": "The number of EM iterations to perform." @@ -174095,7 +173336,7 @@ "qname": "sklearn.mixture._bayesian_mixture.BayesianGaussianMixture.__init__.n_init", "default_value": "1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1", "description": "The number of initializations to perform. The result with the highest\nlower bound value on the likelihood is kept." @@ -174111,14 +173352,14 @@ "qname": "sklearn.mixture._bayesian_mixture.BayesianGaussianMixture.__init__.init_params", "default_value": "'kmeans'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'kmeans', 'k-means++', 'random', 'random_from_data'}, default='kmeans'", "description": "The method used to initialize the weights, the means and the\ncovariances.\nString must be one of:\n\n 'kmeans' : responsibilities are initialized using kmeans.\n 'k-means++' : use the k-means++ method to initialize.\n 'random' : responsibilities are initialized randomly.\n 'random_from_data' : initial means are randomly selected data points.\n\n.. versionchanged:: v1.1\n `init_params` now accepts 'random_from_data' and 'k-means++' as\n initialization methods." }, "type": { "kind": "EnumType", - "values": ["kmeans", "k-means++", "random_from_data", "random"] + "values": ["k-means++", "random", "random_from_data", "kmeans"] } }, { @@ -174127,7 +173368,7 @@ "qname": "sklearn.mixture._bayesian_mixture.BayesianGaussianMixture.__init__.weight_concentration_prior_type", "default_value": "'dirichlet_process'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str, default='dirichlet_process'", "description": "String describing the type of the weight concentration prior.\nMust be one of::\n\n 'dirichlet_process' (using the Stick-breaking representation),\n 'dirichlet_distribution' (can favor more uniform weights)." @@ -174143,7 +173384,7 @@ "qname": "sklearn.mixture._bayesian_mixture.BayesianGaussianMixture.__init__.weight_concentration_prior", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float or None, default=None", "description": "The dirichlet concentration of each component on the weight\ndistribution (Dirichlet). This is commonly called gamma in the\nliterature. The higher concentration puts more mass in\nthe center and will lead to more components being active, while a lower\nconcentration parameter will lead to more mass at the edge of the\nmixture weights simplex. The value of the parameter must be greater\nthan 0. If it is None, it's set to ``1. / n_components``." @@ -174168,7 +173409,7 @@ "qname": "sklearn.mixture._bayesian_mixture.BayesianGaussianMixture.__init__.mean_precision_prior", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float or None, default=None", "description": "The precision prior on the mean distribution (Gaussian).\nControls the extent of where means can be placed. Larger\nvalues concentrate the cluster means around `mean_prior`.\nThe value of the parameter must be greater than 0.\nIf it is None, it is set to 1." @@ -174193,7 +173434,7 @@ "qname": "sklearn.mixture._bayesian_mixture.BayesianGaussianMixture.__init__.mean_prior", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like, shape (n_features,), default=None", "description": "The prior on the mean distribution (Gaussian).\nIf it is None, it is set to the mean of X." @@ -174218,7 +173459,7 @@ "qname": "sklearn.mixture._bayesian_mixture.BayesianGaussianMixture.__init__.degrees_of_freedom_prior", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float or None, default=None", "description": "The prior of the number of degrees of freedom on the covariance\ndistributions (Wishart). If it is None, it's set to `n_features`." @@ -174243,7 +173484,7 @@ "qname": "sklearn.mixture._bayesian_mixture.BayesianGaussianMixture.__init__.covariance_prior", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float or array-like, default=None", "description": "The prior on the covariance distribution (Wishart).\nIf it is None, the emiprical covariance prior is initialized using the\ncovariance of X. The shape depends on `covariance_type`::\n\n (n_features, n_features) if 'full',\n (n_features, n_features) if 'tied',\n (n_features) if 'diag',\n float if 'spherical'" @@ -174268,7 +173509,7 @@ "qname": "sklearn.mixture._bayesian_mixture.BayesianGaussianMixture.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Controls the random seed given to the method chosen to initialize the\nparameters (see `init_params`).\nIn addition, it controls the generation of random samples from the\nfitted distribution (see the method `sample`).\nPass an int for reproducible output across multiple function calls.\nSee :term:`Glossary `." @@ -174297,7 +173538,7 @@ "qname": "sklearn.mixture._bayesian_mixture.BayesianGaussianMixture.__init__.warm_start", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If 'warm_start' is True, the solution of the last fitting is used as\ninitialization for the next call of fit(). This can speed up\nconvergence when fit is called several times on similar problems.\nSee :term:`the Glossary `." @@ -174313,7 +173554,7 @@ "qname": "sklearn.mixture._bayesian_mixture.BayesianGaussianMixture.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "Enable verbose output. If 1 then it prints the current\ninitialization and each iteration step. If greater than 1 then\nit prints also the log probability and the time needed\nfor each step." @@ -174329,7 +173570,7 @@ "qname": "sklearn.mixture._bayesian_mixture.BayesianGaussianMixture.__init__.verbose_interval", "default_value": "10", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=10", "description": "Number of iteration done before the next print." @@ -174341,7 +173582,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -175400,7 +174641,7 @@ "qname": "sklearn.mixture._gaussian_mixture.GaussianMixture.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -175413,7 +174654,7 @@ "qname": "sklearn.mixture._gaussian_mixture.GaussianMixture.__init__.n_components", "default_value": "1", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1", "description": "The number of mixture components." @@ -175429,14 +174670,14 @@ "qname": "sklearn.mixture._gaussian_mixture.GaussianMixture.__init__.covariance_type", "default_value": "'full'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'full', 'tied', 'diag', 'spherical'}, default='full'", "description": "String describing the type of covariance parameters to use.\nMust be one of:\n\n- 'full': each component has its own general covariance matrix.\n- 'tied': all components share the same general covariance matrix.\n- 'diag': each component has its own diagonal covariance matrix.\n- 'spherical': each component has its own single variance." }, "type": { "kind": "EnumType", - "values": ["tied", "full", "diag", "spherical"] + "values": ["tied", "spherical", "full", "diag"] } }, { @@ -175445,7 +174686,7 @@ "qname": "sklearn.mixture._gaussian_mixture.GaussianMixture.__init__.tol", "default_value": "0.001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-3", "description": "The convergence threshold. EM iterations will stop when the\nlower bound average gain is below this threshold." @@ -175461,7 +174702,7 @@ "qname": "sklearn.mixture._gaussian_mixture.GaussianMixture.__init__.reg_covar", "default_value": "1e-06", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-6", "description": "Non-negative regularization added to the diagonal of covariance.\nAllows to assure that the covariance matrices are all positive." @@ -175477,7 +174718,7 @@ "qname": "sklearn.mixture._gaussian_mixture.GaussianMixture.__init__.max_iter", "default_value": "100", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=100", "description": "The number of EM iterations to perform." @@ -175493,7 +174734,7 @@ "qname": "sklearn.mixture._gaussian_mixture.GaussianMixture.__init__.n_init", "default_value": "1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1", "description": "The number of initializations to perform. The best results are kept." @@ -175509,14 +174750,14 @@ "qname": "sklearn.mixture._gaussian_mixture.GaussianMixture.__init__.init_params", "default_value": "'kmeans'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'kmeans', 'k-means++', 'random', 'random_from_data'}, default='kmeans'", "description": "The method used to initialize the weights, the means and the\nprecisions.\nString must be one of:\n\n- 'kmeans' : responsibilities are initialized using kmeans.\n- 'k-means++' : use the k-means++ method to initialize.\n- 'random' : responsibilities are initialized randomly.\n- 'random_from_data' : initial means are randomly selected data points.\n\n.. versionchanged:: v1.1\n `init_params` now accepts 'random_from_data' and 'k-means++' as\n initialization methods." }, "type": { "kind": "EnumType", - "values": ["kmeans", "k-means++", "random_from_data", "random"] + "values": ["k-means++", "random", "random_from_data", "kmeans"] } }, { @@ -175525,7 +174766,7 @@ "qname": "sklearn.mixture._gaussian_mixture.GaussianMixture.__init__.weights_init", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_components, ), default=None", "description": "The user-provided initial weights.\nIf it is None, weights are initialized using the `init_params` method." @@ -175541,7 +174782,7 @@ "qname": "sklearn.mixture._gaussian_mixture.GaussianMixture.__init__.means_init", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_components, n_features), default=None", "description": "The user-provided initial means,\nIf it is None, means are initialized using the `init_params` method." @@ -175557,7 +174798,7 @@ "qname": "sklearn.mixture._gaussian_mixture.GaussianMixture.__init__.precisions_init", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like, default=None", "description": "The user-provided initial precisions (inverse of the covariance\nmatrices).\nIf it is None, precisions are initialized using the 'init_params'\nmethod.\nThe shape depends on 'covariance_type'::\n\n (n_components,) if 'spherical',\n (n_features, n_features) if 'tied',\n (n_components, n_features) if 'diag',\n (n_components, n_features, n_features) if 'full'" @@ -175573,7 +174814,7 @@ "qname": "sklearn.mixture._gaussian_mixture.GaussianMixture.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Controls the random seed given to the method chosen to initialize the\nparameters (see `init_params`).\nIn addition, it controls the generation of random samples from the\nfitted distribution (see the method `sample`).\nPass an int for reproducible output across multiple function calls.\nSee :term:`Glossary `." @@ -175602,7 +174843,7 @@ "qname": "sklearn.mixture._gaussian_mixture.GaussianMixture.__init__.warm_start", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If 'warm_start' is True, the solution of the last fitting is used as\ninitialization for the next call of fit(). This can speed up\nconvergence when fit is called several times on similar problems.\nIn that case, 'n_init' is ignored and only a single initialization\noccurs upon the first call.\nSee :term:`the Glossary `." @@ -175618,7 +174859,7 @@ "qname": "sklearn.mixture._gaussian_mixture.GaussianMixture.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "Enable verbose output. If 1 then it prints the current\ninitialization and each iteration step. If greater than 1 then\nit prints also the log probability and the time needed\nfor each step." @@ -175634,7 +174875,7 @@ "qname": "sklearn.mixture._gaussian_mixture.GaussianMixture.__init__.verbose_interval", "default_value": "10", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=10", "description": "Number of iteration done before the next print." @@ -175646,7 +174887,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -176016,7 +175257,7 @@ "qname": "sklearn.mixture._gaussian_mixture.GaussianMixture.aic.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -176029,7 +175270,7 @@ "qname": "sklearn.mixture._gaussian_mixture.GaussianMixture.aic.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array of shape (n_samples, n_dimensions)", "description": "The input samples." @@ -176041,7 +175282,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Akaike information criterion for the current model on the input X.\n\nYou can refer to this :ref:`mathematical section ` for more\ndetails regarding the formulation of the AIC used.", "docstring": "Akaike information criterion for the current model on the input X.\n\n You can refer to this :ref:`mathematical section ` for more\n details regarding the formulation of the AIC used.\n\n Parameters\n ----------\n X : array of shape (n_samples, n_dimensions)\n The input samples.\n\n Returns\n -------\n aic : float\n The lower the better.\n " }, @@ -176057,7 +175298,7 @@ "qname": "sklearn.mixture._gaussian_mixture.GaussianMixture.bic.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -176070,7 +175311,7 @@ "qname": "sklearn.mixture._gaussian_mixture.GaussianMixture.bic.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array of shape (n_samples, n_dimensions)", "description": "The input samples." @@ -176082,7 +175323,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Bayesian information criterion for the current model on the input X.\n\nYou can refer to this :ref:`mathematical section ` for more\ndetails regarding the formulation of the BIC used.", "docstring": "Bayesian information criterion for the current model on the input X.\n\n You can refer to this :ref:`mathematical section ` for more\n details regarding the formulation of the BIC used.\n\n Parameters\n ----------\n X : array of shape (n_samples, n_dimensions)\n The input samples.\n\n Returns\n -------\n bic : float\n The lower the better.\n " }, @@ -176415,7 +175656,7 @@ }, "type": { "kind": "EnumType", - "values": ["tied", "full", "diag", "spherical"] + "values": ["tied", "spherical", "full", "diag"] } }, { @@ -176475,7 +175716,7 @@ }, "type": { "kind": "EnumType", - "values": ["tied", "full", "diag", "spherical"] + "values": ["tied", "spherical", "full", "diag"] } } ], @@ -176913,7 +176154,7 @@ }, "type": { "kind": "EnumType", - "values": ["tied", "full", "diag", "spherical"] + "values": ["tied", "spherical", "full", "diag"] } } ], @@ -176989,7 +176230,7 @@ }, "type": { "kind": "EnumType", - "values": ["tied", "full", "diag", "spherical"] + "values": ["tied", "spherical", "full", "diag"] } } ], @@ -177930,7 +177171,7 @@ "qname": "sklearn.model_selection._search.GridSearchCV.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -177943,7 +177184,7 @@ "qname": "sklearn.model_selection._search.GridSearchCV.__init__.estimator", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "estimator object", "description": "This is assumed to implement the scikit-learn estimator interface.\nEither estimator needs to provide a ``score`` function,\nor ``scoring`` must be passed." @@ -177959,7 +177200,7 @@ "qname": "sklearn.model_selection._search.GridSearchCV.__init__.param_grid", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "dict or list of dictionaries", "description": "Dictionary with parameters names (`str`) as keys and lists of\nparameter settings to try as values, or a list of such\ndictionaries, in which case the grids spanned by each dictionary\nin the list are explored. This enables searching over any sequence\nof parameter settings." @@ -177984,7 +177225,7 @@ "qname": "sklearn.model_selection._search.GridSearchCV.__init__.scoring", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str, callable, list, tuple or dict, default=None", "description": "Strategy to evaluate the performance of the cross-validated model on\nthe test set.\n\nIf `scoring` represents a single score, one can use:\n\n- a single string (see :ref:`scoring_parameter`);\n- a callable (see :ref:`scoring`) that returns a single value.\n\nIf `scoring` represents multiple scores, one can use:\n\n- a list or tuple of unique strings;\n- a callable returning a dictionary where the keys are the metric\n names and the values are the metric scores;\n- a dictionary with metric names as keys and callables a values.\n\nSee :ref:`multimetric_grid_search` for an example." @@ -178021,7 +177262,7 @@ "qname": "sklearn.model_selection._search.GridSearchCV.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Number of jobs to run in parallel.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details.\n\n.. versionchanged:: v0.20\n `n_jobs` default changed from 1 to None" @@ -178037,7 +177278,7 @@ "qname": "sklearn.model_selection._search.GridSearchCV.__init__.refit", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, str, or callable, default=True", "description": "Refit an estimator using the best found parameters on the whole\ndataset.\n\nFor multiple metric evaluation, this needs to be a `str` denoting the\nscorer that would be used to find the best parameters for refitting\nthe estimator at the end.\n\nWhere there are considerations other than maximum score in\nchoosing a best estimator, ``refit`` can be set to a function which\nreturns the selected ``best_index_`` given ``cv_results_``. In that\ncase, the ``best_estimator_`` and ``best_params_`` will be set\naccording to the returned ``best_index_`` while the ``best_score_``\nattribute will not be available.\n\nThe refitted estimator is made available at the ``best_estimator_``\nattribute and permits using ``predict`` directly on this\n``GridSearchCV`` instance.\n\nAlso for multiple metric evaluation, the attributes ``best_index_``,\n``best_score_`` and ``best_params_`` will only be available if\n``refit`` is set and all of them will be determined w.r.t this specific\nscorer.\n\nSee ``scoring`` parameter to know more about multiple metric\nevaluation.\n\n.. versionchanged:: 0.20\n Support for callable added." @@ -178066,7 +177307,7 @@ "qname": "sklearn.model_selection._search.GridSearchCV.__init__.cv", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, cross-validation generator or an iterable, default=None", "description": "Determines the cross-validation splitting strategy.\nPossible inputs for cv are:\n\n- None, to use the default 5-fold cross validation,\n- integer, to specify the number of folds in a `(Stratified)KFold`,\n- :term:`CV splitter`,\n- An iterable yielding (train, test) splits as arrays of indices.\n\nFor integer/None inputs, if the estimator is a classifier and ``y`` is\neither binary or multiclass, :class:`StratifiedKFold` is used. In all\nother cases, :class:`KFold` is used. These splitters are instantiated\nwith `shuffle=False` so the splits will be the same across calls.\n\nRefer :ref:`User Guide ` for the various\ncross-validation strategies that can be used here.\n\n.. versionchanged:: 0.22\n ``cv`` default value if None changed from 3-fold to 5-fold." @@ -178095,7 +177336,7 @@ "qname": "sklearn.model_selection._search.GridSearchCV.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int", "description": "Controls the verbosity: the higher, the more messages.\n\n- >1 : the computation time for each fold and parameter candidate is\n displayed;\n- >2 : the score is also displayed;\n- >3 : the fold and candidate parameter indexes are also displayed\n together with the starting time of the computation." @@ -178111,7 +177352,7 @@ "qname": "sklearn.model_selection._search.GridSearchCV.__init__.pre_dispatch", "default_value": "'2*n_jobs'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, or str, default='2*n_jobs'", "description": "Controls the number of jobs that get dispatched during parallel\nexecution. Reducing this number can be useful to avoid an\nexplosion of memory consumption when more jobs get dispatched\nthan CPUs can process. This parameter can be:\n\n - None, in which case all the jobs are immediately\n created and spawned. Use this for lightweight and\n fast-running jobs, to avoid delays due to on-demand\n spawning of the jobs\n\n - An int, giving the exact number of total jobs that are\n spawned\n\n - A str, giving an expression as a function of n_jobs,\n as in '2*n_jobs'" @@ -178136,7 +177377,7 @@ "qname": "sklearn.model_selection._search.GridSearchCV.__init__.error_score", "default_value": "np.nan", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "'raise' or numeric, default=np.nan", "description": "Value to assign to the score if an error occurs in estimator fitting.\nIf set to 'raise', the error is raised. If a numeric value is given,\nFitFailedWarning is raised. This parameter does not affect the refit\nstep, which will always raise the error." @@ -178161,7 +177402,7 @@ "qname": "sklearn.model_selection._search.GridSearchCV.__init__.return_train_score", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If ``False``, the ``cv_results_`` attribute will not include training\nscores.\nComputing training scores is used to get insights on how different\nparameter settings impact the overfitting/underfitting trade-off.\nHowever computing the scores on the training set can be computationally\nexpensive and is not strictly required to select the parameters that\nyield the best generalization performance.\n\n.. versionadded:: 0.19\n\n.. versionchanged:: 0.21\n Default value was changed from ``True`` to ``False``" @@ -178173,7 +177414,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -178227,7 +177468,7 @@ "qname": "sklearn.model_selection._search.ParameterGrid.__getitem__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -178240,7 +177481,7 @@ "qname": "sklearn.model_selection._search.ParameterGrid.__getitem__.ind", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int", "description": "The iteration index" @@ -178252,7 +177493,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Get the parameters that would be ``ind``th in iteration", "docstring": "Get the parameters that would be ``ind``th in iteration\n\n Parameters\n ----------\n ind : int\n The iteration index\n\n Returns\n -------\n params : dict of str to any\n Equal to list(self)[ind]\n " }, @@ -178268,7 +177509,7 @@ "qname": "sklearn.model_selection._search.ParameterGrid.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -178281,7 +177522,7 @@ "qname": "sklearn.model_selection._search.ParameterGrid.__init__.param_grid", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "dict of str to sequence, or sequence of such", "description": "The parameter grid to explore, as a dictionary mapping estimator\nparameters to sequences of allowed values.\n\nAn empty dict signifies default parameters.\n\nA sequence of dicts signifies a sequence of grids to search, and is\nuseful to avoid exploring parameter combinations that make no sense\nor have no effect. See the examples below." @@ -178302,7 +177543,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -178318,7 +177559,7 @@ "qname": "sklearn.model_selection._search.ParameterGrid.__iter__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -178327,7 +177568,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Iterate over the points in the grid.", "docstring": "Iterate over the points in the grid.\n\n Returns\n -------\n params : iterator over dict of str to any\n Yields dictionaries mapping each estimator parameter to one of its\n allowed values.\n " }, @@ -178343,7 +177584,7 @@ "qname": "sklearn.model_selection._search.ParameterGrid.__len__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -178352,7 +177593,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Number of points on the grid.", "docstring": "Number of points on the grid." }, @@ -178368,7 +177609,7 @@ "qname": "sklearn.model_selection._search.ParameterSampler.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -178381,7 +177622,7 @@ "qname": "sklearn.model_selection._search.ParameterSampler.__init__.param_distributions", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "dict", "description": "Dictionary with parameters names (`str`) as keys and distributions\nor lists of parameters to try. Distributions must provide a ``rvs``\nmethod for sampling (such as those from scipy.stats.distributions).\nIf a list is given, it is sampled uniformly.\nIf a list of dicts is given, first a dict is sampled uniformly, and\nthen a parameter is sampled using that dict as above." @@ -178397,7 +177638,7 @@ "qname": "sklearn.model_selection._search.ParameterSampler.__init__.n_iter", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int", "description": "Number of parameter settings that are produced." @@ -178413,7 +177654,7 @@ "qname": "sklearn.model_selection._search.ParameterSampler.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Pseudo random number generator state used for random uniform sampling\nfrom lists of possible values instead of scipy.stats distributions.\nPass an int for reproducible output across multiple\nfunction calls.\nSee :term:`Glossary `." @@ -178438,7 +177679,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -178454,7 +177695,7 @@ "qname": "sklearn.model_selection._search.ParameterSampler.__iter__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -178463,7 +177704,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -178479,7 +177720,7 @@ "qname": "sklearn.model_selection._search.ParameterSampler.__len__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -178488,7 +177729,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Number of points that will be sampled.", "docstring": "Number of points that will be sampled." }, @@ -178529,7 +177770,7 @@ "qname": "sklearn.model_selection._search.RandomizedSearchCV.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -178542,7 +177783,7 @@ "qname": "sklearn.model_selection._search.RandomizedSearchCV.__init__.estimator", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "estimator object", "description": "A object of that type is instantiated for each grid point.\nThis is assumed to implement the scikit-learn estimator interface.\nEither estimator needs to provide a ``score`` function,\nor ``scoring`` must be passed." @@ -178558,7 +177799,7 @@ "qname": "sklearn.model_selection._search.RandomizedSearchCV.__init__.param_distributions", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "dict or list of dicts", "description": "Dictionary with parameters names (`str`) as keys and distributions\nor lists of parameters to try. Distributions must provide a ``rvs``\nmethod for sampling (such as those from scipy.stats.distributions).\nIf a list is given, it is sampled uniformly.\nIf a list of dicts is given, first a dict is sampled uniformly, and\nthen a parameter is sampled using that dict as above." @@ -178583,7 +177824,7 @@ "qname": "sklearn.model_selection._search.RandomizedSearchCV.__init__.n_iter", "default_value": "10", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=10", "description": "Number of parameter settings that are sampled. n_iter trades\noff runtime vs quality of the solution." @@ -178599,7 +177840,7 @@ "qname": "sklearn.model_selection._search.RandomizedSearchCV.__init__.scoring", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str, callable, list, tuple or dict, default=None", "description": "Strategy to evaluate the performance of the cross-validated model on\nthe test set.\n\nIf `scoring` represents a single score, one can use:\n\n- a single string (see :ref:`scoring_parameter`);\n- a callable (see :ref:`scoring`) that returns a single value.\n\nIf `scoring` represents multiple scores, one can use:\n\n- a list or tuple of unique strings;\n- a callable returning a dictionary where the keys are the metric\n names and the values are the metric scores;\n- a dictionary with metric names as keys and callables a values.\n\nSee :ref:`multimetric_grid_search` for an example.\n\nIf None, the estimator's score method is used." @@ -178636,7 +177877,7 @@ "qname": "sklearn.model_selection._search.RandomizedSearchCV.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Number of jobs to run in parallel.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details.\n\n.. versionchanged:: v0.20\n `n_jobs` default changed from 1 to None" @@ -178652,7 +177893,7 @@ "qname": "sklearn.model_selection._search.RandomizedSearchCV.__init__.refit", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, str, or callable, default=True", "description": "Refit an estimator using the best found parameters on the whole\ndataset.\n\nFor multiple metric evaluation, this needs to be a `str` denoting the\nscorer that would be used to find the best parameters for refitting\nthe estimator at the end.\n\nWhere there are considerations other than maximum score in\nchoosing a best estimator, ``refit`` can be set to a function which\nreturns the selected ``best_index_`` given the ``cv_results``. In that\ncase, the ``best_estimator_`` and ``best_params_`` will be set\naccording to the returned ``best_index_`` while the ``best_score_``\nattribute will not be available.\n\nThe refitted estimator is made available at the ``best_estimator_``\nattribute and permits using ``predict`` directly on this\n``RandomizedSearchCV`` instance.\n\nAlso for multiple metric evaluation, the attributes ``best_index_``,\n``best_score_`` and ``best_params_`` will only be available if\n``refit`` is set and all of them will be determined w.r.t this specific\nscorer.\n\nSee ``scoring`` parameter to know more about multiple metric\nevaluation.\n\n.. versionchanged:: 0.20\n Support for callable added." @@ -178681,7 +177922,7 @@ "qname": "sklearn.model_selection._search.RandomizedSearchCV.__init__.cv", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, cross-validation generator or an iterable, default=None", "description": "Determines the cross-validation splitting strategy.\nPossible inputs for cv are:\n\n- None, to use the default 5-fold cross validation,\n- integer, to specify the number of folds in a `(Stratified)KFold`,\n- :term:`CV splitter`,\n- An iterable yielding (train, test) splits as arrays of indices.\n\nFor integer/None inputs, if the estimator is a classifier and ``y`` is\neither binary or multiclass, :class:`StratifiedKFold` is used. In all\nother cases, :class:`KFold` is used. These splitters are instantiated\nwith `shuffle=False` so the splits will be the same across calls.\n\nRefer :ref:`User Guide ` for the various\ncross-validation strategies that can be used here.\n\n.. versionchanged:: 0.22\n ``cv`` default value if None changed from 3-fold to 5-fold." @@ -178710,7 +177951,7 @@ "qname": "sklearn.model_selection._search.RandomizedSearchCV.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int", "description": "Controls the verbosity: the higher, the more messages." @@ -178726,7 +177967,7 @@ "qname": "sklearn.model_selection._search.RandomizedSearchCV.__init__.pre_dispatch", "default_value": "'2*n_jobs'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, or str, default='2*n_jobs'", "description": "Controls the number of jobs that get dispatched during parallel\nexecution. Reducing this number can be useful to avoid an\nexplosion of memory consumption when more jobs get dispatched\nthan CPUs can process. This parameter can be:\n\n - None, in which case all the jobs are immediately\n created and spawned. Use this for lightweight and\n fast-running jobs, to avoid delays due to on-demand\n spawning of the jobs\n\n - An int, giving the exact number of total jobs that are\n spawned\n\n - A str, giving an expression as a function of n_jobs,\n as in '2*n_jobs'" @@ -178751,7 +177992,7 @@ "qname": "sklearn.model_selection._search.RandomizedSearchCV.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Pseudo random number generator state used for random uniform sampling\nfrom lists of possible values instead of scipy.stats distributions.\nPass an int for reproducible output across multiple\nfunction calls.\nSee :term:`Glossary `." @@ -178780,7 +178021,7 @@ "qname": "sklearn.model_selection._search.RandomizedSearchCV.__init__.error_score", "default_value": "np.nan", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "'raise' or numeric, default=np.nan", "description": "Value to assign to the score if an error occurs in estimator fitting.\nIf set to 'raise', the error is raised. If a numeric value is given,\nFitFailedWarning is raised. This parameter does not affect the refit\nstep, which will always raise the error." @@ -178805,7 +178046,7 @@ "qname": "sklearn.model_selection._search.RandomizedSearchCV.__init__.return_train_score", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If ``False``, the ``cv_results_`` attribute will not include training\nscores.\nComputing training scores is used to get insights on how different\nparameter settings impact the overfitting/underfitting trade-off.\nHowever computing the scores on the training set can be computationally\nexpensive and is not strictly required to select the parameters that\nyield the best generalization performance.\n\n.. versionadded:: 0.19\n\n.. versionchanged:: 0.21\n Default value was changed from ``True`` to ``False``" @@ -178817,7 +178058,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -179439,7 +178680,7 @@ "qname": "sklearn.model_selection._search_successive_halving.HalvingGridSearchCV.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -179452,7 +178693,7 @@ "qname": "sklearn.model_selection._search_successive_halving.HalvingGridSearchCV.__init__.estimator", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "estimator object", "description": "This is assumed to implement the scikit-learn estimator interface.\nEither estimator needs to provide a ``score`` function,\nor ``scoring`` must be passed." @@ -179468,7 +178709,7 @@ "qname": "sklearn.model_selection._search_successive_halving.HalvingGridSearchCV.__init__.param_grid", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "dict or list of dictionaries", "description": "Dictionary with parameters names (string) as keys and lists of\nparameter settings to try as values, or a list of such\ndictionaries, in which case the grids spanned by each dictionary\nin the list are explored. This enables searching over any sequence\nof parameter settings." @@ -179493,7 +178734,7 @@ "qname": "sklearn.model_selection._search_successive_halving.HalvingGridSearchCV.__init__.factor", "default_value": "3", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or float, default=3", "description": "The 'halving' parameter, which determines the proportion of candidates\nthat are selected for each subsequent iteration. For example,\n``factor=3`` means that only one third of the candidates are selected." @@ -179518,7 +178759,7 @@ "qname": "sklearn.model_selection._search_successive_halving.HalvingGridSearchCV.__init__.resource", "default_value": "'n_samples'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "``'n_samples'`` or str, default='n_samples'", "description": "Defines the resource that increases with each iteration. By default,\nthe resource is the number of samples. It can also be set to any\nparameter of the base estimator that accepts positive integer\nvalues, e.g. 'n_iterations' or 'n_estimators' for a gradient\nboosting estimator. In this case ``max_resources`` cannot be 'auto'\nand must be set explicitly." @@ -179543,7 +178784,7 @@ "qname": "sklearn.model_selection._search_successive_halving.HalvingGridSearchCV.__init__.max_resources", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default='auto'", "description": "The maximum amount of resource that any candidate is allowed to use\nfor a given iteration. By default, this is set to ``n_samples`` when\n``resource='n_samples'`` (default), else an error is raised." @@ -179559,7 +178800,7 @@ "qname": "sklearn.model_selection._search_successive_halving.HalvingGridSearchCV.__init__.min_resources", "default_value": "'exhaust'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'exhaust', 'smallest'} or int, default='exhaust'", "description": "The minimum amount of resource that any candidate is allowed to use\nfor a given iteration. Equivalently, this defines the amount of\nresources `r0` that are allocated for each candidate at the first\niteration.\n\n- 'smallest' is a heuristic that sets `r0` to a small value:\n\n - ``n_splits * 2`` when ``resource='n_samples'`` for a regression\n problem\n - ``n_classes * n_splits * 2`` when ``resource='n_samples'`` for a\n classification problem\n - ``1`` when ``resource != 'n_samples'``\n\n- 'exhaust' will set `r0` such that the **last** iteration uses as\n much resources as possible. Namely, the last iteration will use the\n highest value smaller than ``max_resources`` that is a multiple of\n both ``min_resources`` and ``factor``. In general, using 'exhaust'\n leads to a more accurate estimator, but is slightly more time\n consuming.\n\nNote that the amount of resources used at each iteration is always a\nmultiple of ``min_resources``." @@ -179569,7 +178810,7 @@ "types": [ { "kind": "EnumType", - "values": ["smallest", "exhaust"] + "values": ["exhaust", "smallest"] }, { "kind": "NamedType", @@ -179584,7 +178825,7 @@ "qname": "sklearn.model_selection._search_successive_halving.HalvingGridSearchCV.__init__.aggressive_elimination", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "This is only relevant in cases where there isn't enough resources to\nreduce the remaining candidates to at most `factor` after the last\niteration. If ``True``, then the search process will 'replay' the\nfirst iteration for as long as needed until the number of candidates\nis small enough. This is ``False`` by default, which means that the\nlast iteration may evaluate more than ``factor`` candidates. See\n:ref:`aggressive_elimination` for more details." @@ -179600,7 +178841,7 @@ "qname": "sklearn.model_selection._search_successive_halving.HalvingGridSearchCV.__init__.cv", "default_value": "5", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, cross-validation generator or iterable, default=5", "description": "Determines the cross-validation splitting strategy.\nPossible inputs for cv are:\n\n- integer, to specify the number of folds in a `(Stratified)KFold`,\n- :term:`CV splitter`,\n- An iterable yielding (train, test) splits as arrays of indices.\n\nFor integer/None inputs, if the estimator is a classifier and ``y`` is\neither binary or multiclass, :class:`StratifiedKFold` is used. In all\nother cases, :class:`KFold` is used. These splitters are instantiated\nwith `shuffle=False` so the splits will be the same across calls.\n\nRefer :ref:`User Guide ` for the various\ncross-validation strategies that can be used here.\n\n.. note::\n Due to implementation details, the folds produced by `cv` must be\n the same across multiple calls to `cv.split()`. For\n built-in `scikit-learn` iterators, this can be achieved by\n deactivating shuffling (`shuffle=False`), or by setting the\n `cv`'s `random_state` parameter to an integer." @@ -179629,7 +178870,7 @@ "qname": "sklearn.model_selection._search_successive_halving.HalvingGridSearchCV.__init__.scoring", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str, callable, or None, default=None", "description": "A single string (see :ref:`scoring_parameter`) or a callable\n(see :ref:`scoring`) to evaluate the predictions on the test set.\nIf None, the estimator's score method is used." @@ -179658,7 +178899,7 @@ "qname": "sklearn.model_selection._search_successive_halving.HalvingGridSearchCV.__init__.refit", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If True, refit an estimator using the best found parameters on the\nwhole dataset.\n\nThe refitted estimator is made available at the ``best_estimator_``\nattribute and permits using ``predict`` directly on this\n``HalvingGridSearchCV`` instance." @@ -179674,7 +178915,7 @@ "qname": "sklearn.model_selection._search_successive_halving.HalvingGridSearchCV.__init__.error_score", "default_value": "np.nan", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "'raise' or numeric", "description": "Value to assign to the score if an error occurs in estimator fitting.\nIf set to 'raise', the error is raised. If a numeric value is given,\nFitFailedWarning is raised. This parameter does not affect the refit\nstep, which will always raise the error. Default is ``np.nan``." @@ -179699,7 +178940,7 @@ "qname": "sklearn.model_selection._search_successive_halving.HalvingGridSearchCV.__init__.return_train_score", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If ``False``, the ``cv_results_`` attribute will not include training\nscores.\nComputing training scores is used to get insights on how different\nparameter settings impact the overfitting/underfitting trade-off.\nHowever computing the scores on the training set can be computationally\nexpensive and is not strictly required to select the parameters that\nyield the best generalization performance." @@ -179715,7 +178956,7 @@ "qname": "sklearn.model_selection._search_successive_halving.HalvingGridSearchCV.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Pseudo random number generator state used for subsampling the dataset\nwhen `resources != 'n_samples'`. Ignored otherwise.\nPass an int for reproducible output across multiple function calls.\nSee :term:`Glossary `." @@ -179744,7 +178985,7 @@ "qname": "sklearn.model_selection._search_successive_halving.HalvingGridSearchCV.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or None, default=None", "description": "Number of jobs to run in parallel.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details." @@ -179769,7 +179010,7 @@ "qname": "sklearn.model_selection._search_successive_halving.HalvingGridSearchCV.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int", "description": "Controls the verbosity: the higher, the more messages." @@ -179781,7 +179022,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -179822,7 +179063,7 @@ "qname": "sklearn.model_selection._search_successive_halving.HalvingRandomSearchCV.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -179835,7 +179076,7 @@ "qname": "sklearn.model_selection._search_successive_halving.HalvingRandomSearchCV.__init__.estimator", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "estimator object", "description": "This is assumed to implement the scikit-learn estimator interface.\nEither estimator needs to provide a ``score`` function,\nor ``scoring`` must be passed." @@ -179851,7 +179092,7 @@ "qname": "sklearn.model_selection._search_successive_halving.HalvingRandomSearchCV.__init__.param_distributions", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "dict", "description": "Dictionary with parameters names (string) as keys and distributions\nor lists of parameters to try. Distributions must provide a ``rvs``\nmethod for sampling (such as those from scipy.stats.distributions).\nIf a list is given, it is sampled uniformly." @@ -179867,7 +179108,7 @@ "qname": "sklearn.model_selection._search_successive_halving.HalvingRandomSearchCV.__init__.n_candidates", "default_value": "'exhaust'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default='exhaust'", "description": "The number of candidate parameters to sample, at the first\niteration. Using 'exhaust' will sample enough candidates so that the\nlast iteration uses as many resources as possible, based on\n`min_resources`, `max_resources` and `factor`. In this case,\n`min_resources` cannot be 'exhaust'." @@ -179883,7 +179124,7 @@ "qname": "sklearn.model_selection._search_successive_halving.HalvingRandomSearchCV.__init__.factor", "default_value": "3", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or float, default=3", "description": "The 'halving' parameter, which determines the proportion of candidates\nthat are selected for each subsequent iteration. For example,\n``factor=3`` means that only one third of the candidates are selected." @@ -179908,7 +179149,7 @@ "qname": "sklearn.model_selection._search_successive_halving.HalvingRandomSearchCV.__init__.resource", "default_value": "'n_samples'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "``'n_samples'`` or str, default='n_samples'", "description": "Defines the resource that increases with each iteration. By default,\nthe resource is the number of samples. It can also be set to any\nparameter of the base estimator that accepts positive integer\nvalues, e.g. 'n_iterations' or 'n_estimators' for a gradient\nboosting estimator. In this case ``max_resources`` cannot be 'auto'\nand must be set explicitly." @@ -179933,7 +179174,7 @@ "qname": "sklearn.model_selection._search_successive_halving.HalvingRandomSearchCV.__init__.max_resources", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default='auto'", "description": "The maximum number of resources that any candidate is allowed to use\nfor a given iteration. By default, this is set ``n_samples`` when\n``resource='n_samples'`` (default), else an error is raised." @@ -179949,7 +179190,7 @@ "qname": "sklearn.model_selection._search_successive_halving.HalvingRandomSearchCV.__init__.min_resources", "default_value": "'smallest'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'exhaust', 'smallest'} or int, default='smallest'", "description": "The minimum amount of resource that any candidate is allowed to use\nfor a given iteration. Equivalently, this defines the amount of\nresources `r0` that are allocated for each candidate at the first\niteration.\n\n- 'smallest' is a heuristic that sets `r0` to a small value:\n\n - ``n_splits * 2`` when ``resource='n_samples'`` for a regression\n problem\n - ``n_classes * n_splits * 2`` when ``resource='n_samples'`` for a\n classification problem\n - ``1`` when ``resource != 'n_samples'``\n\n- 'exhaust' will set `r0` such that the **last** iteration uses as\n much resources as possible. Namely, the last iteration will use the\n highest value smaller than ``max_resources`` that is a multiple of\n both ``min_resources`` and ``factor``. In general, using 'exhaust'\n leads to a more accurate estimator, but is slightly more time\n consuming. 'exhaust' isn't available when `n_candidates='exhaust'`.\n\nNote that the amount of resources used at each iteration is always a\nmultiple of ``min_resources``." @@ -179959,7 +179200,7 @@ "types": [ { "kind": "EnumType", - "values": ["smallest", "exhaust"] + "values": ["exhaust", "smallest"] }, { "kind": "NamedType", @@ -179974,7 +179215,7 @@ "qname": "sklearn.model_selection._search_successive_halving.HalvingRandomSearchCV.__init__.aggressive_elimination", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "This is only relevant in cases where there isn't enough resources to\nreduce the remaining candidates to at most `factor` after the last\niteration. If ``True``, then the search process will 'replay' the\nfirst iteration for as long as needed until the number of candidates\nis small enough. This is ``False`` by default, which means that the\nlast iteration may evaluate more than ``factor`` candidates. See\n:ref:`aggressive_elimination` for more details." @@ -179990,7 +179231,7 @@ "qname": "sklearn.model_selection._search_successive_halving.HalvingRandomSearchCV.__init__.cv", "default_value": "5", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, cross-validation generator or an iterable, default=5", "description": "Determines the cross-validation splitting strategy.\nPossible inputs for cv are:\n\n- integer, to specify the number of folds in a `(Stratified)KFold`,\n- :term:`CV splitter`,\n- An iterable yielding (train, test) splits as arrays of indices.\n\nFor integer/None inputs, if the estimator is a classifier and ``y`` is\neither binary or multiclass, :class:`StratifiedKFold` is used. In all\nother cases, :class:`KFold` is used. These splitters are instantiated\nwith `shuffle=False` so the splits will be the same across calls.\n\nRefer :ref:`User Guide ` for the various\ncross-validation strategies that can be used here.\n\n.. note::\n Due to implementation details, the folds produced by `cv` must be\n the same across multiple calls to `cv.split()`. For\n built-in `scikit-learn` iterators, this can be achieved by\n deactivating shuffling (`shuffle=False`), or by setting the\n `cv`'s `random_state` parameter to an integer." @@ -180019,7 +179260,7 @@ "qname": "sklearn.model_selection._search_successive_halving.HalvingRandomSearchCV.__init__.scoring", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str, callable, or None, default=None", "description": "A single string (see :ref:`scoring_parameter`) or a callable\n(see :ref:`scoring`) to evaluate the predictions on the test set.\nIf None, the estimator's score method is used." @@ -180048,7 +179289,7 @@ "qname": "sklearn.model_selection._search_successive_halving.HalvingRandomSearchCV.__init__.refit", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If True, refit an estimator using the best found parameters on the\nwhole dataset.\n\nThe refitted estimator is made available at the ``best_estimator_``\nattribute and permits using ``predict`` directly on this\n``HalvingRandomSearchCV`` instance." @@ -180064,7 +179305,7 @@ "qname": "sklearn.model_selection._search_successive_halving.HalvingRandomSearchCV.__init__.error_score", "default_value": "np.nan", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "'raise' or numeric", "description": "Value to assign to the score if an error occurs in estimator fitting.\nIf set to 'raise', the error is raised. If a numeric value is given,\nFitFailedWarning is raised. This parameter does not affect the refit\nstep, which will always raise the error. Default is ``np.nan``." @@ -180089,7 +179330,7 @@ "qname": "sklearn.model_selection._search_successive_halving.HalvingRandomSearchCV.__init__.return_train_score", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If ``False``, the ``cv_results_`` attribute will not include training\nscores.\nComputing training scores is used to get insights on how different\nparameter settings impact the overfitting/underfitting trade-off.\nHowever computing the scores on the training set can be computationally\nexpensive and is not strictly required to select the parameters that\nyield the best generalization performance." @@ -180105,7 +179346,7 @@ "qname": "sklearn.model_selection._search_successive_halving.HalvingRandomSearchCV.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Pseudo random number generator state used for subsampling the dataset\nwhen `resources != 'n_samples'`. Also used for random uniform\nsampling from lists of possible values instead of scipy.stats\ndistributions.\nPass an int for reproducible output across multiple function calls.\nSee :term:`Glossary `." @@ -180134,7 +179375,7 @@ "qname": "sklearn.model_selection._search_successive_halving.HalvingRandomSearchCV.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or None, default=None", "description": "Number of jobs to run in parallel.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details." @@ -180159,7 +179400,7 @@ "qname": "sklearn.model_selection._search_successive_halving.HalvingRandomSearchCV.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int", "description": "Controls the verbosity: the higher, the more messages." @@ -180171,7 +179412,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -180404,7 +179645,7 @@ "qname": "sklearn.model_selection._split.BaseCrossValidator.__repr__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -180413,7 +179654,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -180557,7 +179798,7 @@ "qname": "sklearn.model_selection._split.BaseCrossValidator.get_n_splits.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -180570,7 +179811,7 @@ "qname": "sklearn.model_selection._split.BaseCrossValidator.get_n_splits.X", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -180583,7 +179824,7 @@ "qname": "sklearn.model_selection._split.BaseCrossValidator.get_n_splits.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -180596,7 +179837,7 @@ "qname": "sklearn.model_selection._split.BaseCrossValidator.get_n_splits.groups", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -180605,7 +179846,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Returns the number of splitting iterations in the cross-validator", "docstring": "Returns the number of splitting iterations in the cross-validator" }, @@ -180621,7 +179862,7 @@ "qname": "sklearn.model_selection._split.BaseCrossValidator.split.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -180634,7 +179875,7 @@ "qname": "sklearn.model_selection._split.BaseCrossValidator.split.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training data, where `n_samples` is the number of samples\nand `n_features` is the number of features." @@ -180650,7 +179891,7 @@ "qname": "sklearn.model_selection._split.BaseCrossValidator.split.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "The target variable for supervised learning problems." @@ -180666,7 +179907,7 @@ "qname": "sklearn.model_selection._split.BaseCrossValidator.split.groups", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "Group labels for the samples used while splitting the dataset into\ntrain/test set." @@ -180678,7 +179919,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Generate indices to split data into training and test set.", "docstring": "Generate indices to split data into training and test set.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n y : array-like of shape (n_samples,)\n The target variable for supervised learning problems.\n\n groups : array-like of shape (n_samples,), default=None\n Group labels for the samples used while splitting the dataset into\n train/test set.\n\n Yields\n ------\n train : ndarray\n The training set indices for that split.\n\n test : ndarray\n The testing set indices for that split.\n " }, @@ -180694,7 +179935,7 @@ "qname": "sklearn.model_selection._split.BaseShuffleSplit.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -180707,7 +179948,7 @@ "qname": "sklearn.model_selection._split.BaseShuffleSplit.__init__.n_splits", "default_value": "10", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -180720,7 +179961,7 @@ "qname": "sklearn.model_selection._split.BaseShuffleSplit.__init__.test_size", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -180733,7 +179974,7 @@ "qname": "sklearn.model_selection._split.BaseShuffleSplit.__init__.train_size", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -180746,7 +179987,7 @@ "qname": "sklearn.model_selection._split.BaseShuffleSplit.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -180755,7 +179996,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -180771,7 +180012,7 @@ "qname": "sklearn.model_selection._split.BaseShuffleSplit.__repr__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -180780,7 +180021,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -180860,7 +180101,7 @@ "qname": "sklearn.model_selection._split.BaseShuffleSplit.get_n_splits.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -180873,7 +180114,7 @@ "qname": "sklearn.model_selection._split.BaseShuffleSplit.get_n_splits.X", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "object", "description": "Always ignored, exists for compatibility." @@ -180889,7 +180130,7 @@ "qname": "sklearn.model_selection._split.BaseShuffleSplit.get_n_splits.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "object", "description": "Always ignored, exists for compatibility." @@ -180905,7 +180146,7 @@ "qname": "sklearn.model_selection._split.BaseShuffleSplit.get_n_splits.groups", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "object", "description": "Always ignored, exists for compatibility." @@ -180917,7 +180158,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Returns the number of splitting iterations in the cross-validator", "docstring": "Returns the number of splitting iterations in the cross-validator\n\n Parameters\n ----------\n X : object\n Always ignored, exists for compatibility.\n\n y : object\n Always ignored, exists for compatibility.\n\n groups : object\n Always ignored, exists for compatibility.\n\n Returns\n -------\n n_splits : int\n Returns the number of splitting iterations in the cross-validator.\n " }, @@ -180933,7 +180174,7 @@ "qname": "sklearn.model_selection._split.BaseShuffleSplit.split.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -180946,7 +180187,7 @@ "qname": "sklearn.model_selection._split.BaseShuffleSplit.split.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training data, where `n_samples` is the number of samples\nand `n_features` is the number of features." @@ -180962,7 +180203,7 @@ "qname": "sklearn.model_selection._split.BaseShuffleSplit.split.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "The target variable for supervised learning problems." @@ -180978,7 +180219,7 @@ "qname": "sklearn.model_selection._split.BaseShuffleSplit.split.groups", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "Group labels for the samples used while splitting the dataset into\ntrain/test set." @@ -180990,7 +180231,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Generate indices to split data into training and test set.", "docstring": "Generate indices to split data into training and test set.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n y : array-like of shape (n_samples,)\n The target variable for supervised learning problems.\n\n groups : array-like of shape (n_samples,), default=None\n Group labels for the samples used while splitting the dataset into\n train/test set.\n\n Yields\n ------\n train : ndarray\n The training set indices for that split.\n\n test : ndarray\n The testing set indices for that split.\n\n Notes\n -----\n Randomized CV splitters may return different results for each call of\n split. You can make the results identical by setting `random_state`\n to an integer.\n " }, @@ -181006,7 +180247,7 @@ "qname": "sklearn.model_selection._split.GroupKFold.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -181019,7 +180260,7 @@ "qname": "sklearn.model_selection._split.GroupKFold.__init__.n_splits", "default_value": "5", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=5", "description": "Number of folds. Must be at least 2.\n\n.. versionchanged:: 0.22\n ``n_splits`` default value changed from 3 to 5." @@ -181031,7 +180272,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -181111,7 +180352,7 @@ "qname": "sklearn.model_selection._split.GroupKFold.split.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -181124,7 +180365,7 @@ "qname": "sklearn.model_selection._split.GroupKFold.split.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training data, where `n_samples` is the number of samples\nand `n_features` is the number of features." @@ -181140,7 +180381,7 @@ "qname": "sklearn.model_selection._split.GroupKFold.split.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "The target variable for supervised learning problems." @@ -181156,7 +180397,7 @@ "qname": "sklearn.model_selection._split.GroupKFold.split.groups", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "Group labels for the samples used while splitting the dataset into\ntrain/test set." @@ -181168,7 +180409,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Generate indices to split data into training and test set.", "docstring": "Generate indices to split data into training and test set.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n y : array-like of shape (n_samples,), default=None\n The target variable for supervised learning problems.\n\n groups : array-like of shape (n_samples,)\n Group labels for the samples used while splitting the dataset into\n train/test set.\n\n Yields\n ------\n train : ndarray\n The training set indices for that split.\n\n test : ndarray\n The testing set indices for that split.\n " }, @@ -181184,7 +180425,7 @@ "qname": "sklearn.model_selection._split.GroupShuffleSplit.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -181197,7 +180438,7 @@ "qname": "sklearn.model_selection._split.GroupShuffleSplit.__init__.n_splits", "default_value": "5", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=5", "description": "Number of re-shuffling & splitting iterations." @@ -181213,7 +180454,7 @@ "qname": "sklearn.model_selection._split.GroupShuffleSplit.__init__.test_size", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, int, default=0.2", "description": "If float, should be between 0.0 and 1.0 and represent the proportion\nof groups to include in the test split (rounded up). If int,\nrepresents the absolute number of test groups. If None, the value is\nset to the complement of the train size.\nThe default will change in version 0.21. It will remain 0.2 only\nif ``train_size`` is unspecified, otherwise it will complement\nthe specified ``train_size``." @@ -181238,7 +180479,7 @@ "qname": "sklearn.model_selection._split.GroupShuffleSplit.__init__.train_size", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float or int, default=None", "description": "If float, should be between 0.0 and 1.0 and represent the\nproportion of the groups to include in the train split. If\nint, represents the absolute number of train groups. If None,\nthe value is automatically set to the complement of the test size." @@ -181263,7 +180504,7 @@ "qname": "sklearn.model_selection._split.GroupShuffleSplit.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Controls the randomness of the training and testing indices produced.\nPass an int for reproducible output across multiple function calls.\nSee :term:`Glossary `." @@ -181288,7 +180529,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -181368,7 +180609,7 @@ "qname": "sklearn.model_selection._split.GroupShuffleSplit.split.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -181381,7 +180622,7 @@ "qname": "sklearn.model_selection._split.GroupShuffleSplit.split.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training data, where `n_samples` is the number of samples\nand `n_features` is the number of features." @@ -181397,7 +180638,7 @@ "qname": "sklearn.model_selection._split.GroupShuffleSplit.split.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "The target variable for supervised learning problems." @@ -181413,7 +180654,7 @@ "qname": "sklearn.model_selection._split.GroupShuffleSplit.split.groups", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "Group labels for the samples used while splitting the dataset into\ntrain/test set." @@ -181425,7 +180666,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Generate indices to split data into training and test set.", "docstring": "Generate indices to split data into training and test set.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n y : array-like of shape (n_samples,), default=None\n The target variable for supervised learning problems.\n\n groups : array-like of shape (n_samples,)\n Group labels for the samples used while splitting the dataset into\n train/test set.\n\n Yields\n ------\n train : ndarray\n The training set indices for that split.\n\n test : ndarray\n The testing set indices for that split.\n\n Notes\n -----\n Randomized CV splitters may return different results for each call of\n split. You can make the results identical by setting `random_state`\n to an integer.\n " }, @@ -181441,7 +180682,7 @@ "qname": "sklearn.model_selection._split.KFold.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -181454,7 +180695,7 @@ "qname": "sklearn.model_selection._split.KFold.__init__.n_splits", "default_value": "5", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=5", "description": "Number of folds. Must be at least 2.\n\n.. versionchanged:: 0.22\n ``n_splits`` default value changed from 3 to 5." @@ -181470,7 +180711,7 @@ "qname": "sklearn.model_selection._split.KFold.__init__.shuffle", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Whether to shuffle the data before splitting into batches.\nNote that the samples within each split will not be shuffled." @@ -181486,7 +180727,7 @@ "qname": "sklearn.model_selection._split.KFold.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "When `shuffle` is True, `random_state` affects the ordering of the\nindices, which controls the randomness of each fold. Otherwise, this\nparameter has no effect.\nPass an int for reproducible output across multiple function calls.\nSee :term:`Glossary `." @@ -181511,7 +180752,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -181655,7 +180896,7 @@ "qname": "sklearn.model_selection._split.LeaveOneGroupOut.get_n_splits.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -181668,7 +180909,7 @@ "qname": "sklearn.model_selection._split.LeaveOneGroupOut.get_n_splits.X", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "object", "description": "Always ignored, exists for compatibility." @@ -181684,7 +180925,7 @@ "qname": "sklearn.model_selection._split.LeaveOneGroupOut.get_n_splits.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "object", "description": "Always ignored, exists for compatibility." @@ -181700,7 +180941,7 @@ "qname": "sklearn.model_selection._split.LeaveOneGroupOut.get_n_splits.groups", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "Group labels for the samples used while splitting the dataset into\ntrain/test set. This 'groups' parameter must always be specified to\ncalculate the number of splits, though the other parameters can be\nomitted." @@ -181712,7 +180953,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Returns the number of splitting iterations in the cross-validator", "docstring": "Returns the number of splitting iterations in the cross-validator\n\n Parameters\n ----------\n X : object\n Always ignored, exists for compatibility.\n\n y : object\n Always ignored, exists for compatibility.\n\n groups : array-like of shape (n_samples,)\n Group labels for the samples used while splitting the dataset into\n train/test set. This 'groups' parameter must always be specified to\n calculate the number of splits, though the other parameters can be\n omitted.\n\n Returns\n -------\n n_splits : int\n Returns the number of splitting iterations in the cross-validator.\n " }, @@ -181728,7 +180969,7 @@ "qname": "sklearn.model_selection._split.LeaveOneGroupOut.split.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -181741,7 +180982,7 @@ "qname": "sklearn.model_selection._split.LeaveOneGroupOut.split.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training data, where `n_samples` is the number of samples\nand `n_features` is the number of features." @@ -181757,7 +180998,7 @@ "qname": "sklearn.model_selection._split.LeaveOneGroupOut.split.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "The target variable for supervised learning problems." @@ -181773,7 +181014,7 @@ "qname": "sklearn.model_selection._split.LeaveOneGroupOut.split.groups", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "Group labels for the samples used while splitting the dataset into\ntrain/test set." @@ -181785,7 +181026,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Generate indices to split data into training and test set.", "docstring": "Generate indices to split data into training and test set.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n y : array-like of shape (n_samples,), default=None\n The target variable for supervised learning problems.\n\n groups : array-like of shape (n_samples,)\n Group labels for the samples used while splitting the dataset into\n train/test set.\n\n Yields\n ------\n train : ndarray\n The training set indices for that split.\n\n test : ndarray\n The testing set indices for that split.\n " }, @@ -181865,7 +181106,7 @@ "qname": "sklearn.model_selection._split.LeaveOneOut.get_n_splits.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -181878,7 +181119,7 @@ "qname": "sklearn.model_selection._split.LeaveOneOut.get_n_splits.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training data, where `n_samples` is the number of samples\nand `n_features` is the number of features." @@ -181894,7 +181135,7 @@ "qname": "sklearn.model_selection._split.LeaveOneOut.get_n_splits.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "object", "description": "Always ignored, exists for compatibility." @@ -181910,7 +181151,7 @@ "qname": "sklearn.model_selection._split.LeaveOneOut.get_n_splits.groups", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "object", "description": "Always ignored, exists for compatibility." @@ -181922,7 +181163,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Returns the number of splitting iterations in the cross-validator", "docstring": "Returns the number of splitting iterations in the cross-validator\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n y : object\n Always ignored, exists for compatibility.\n\n groups : object\n Always ignored, exists for compatibility.\n\n Returns\n -------\n n_splits : int\n Returns the number of splitting iterations in the cross-validator.\n " }, @@ -181938,7 +181179,7 @@ "qname": "sklearn.model_selection._split.LeavePGroupsOut.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -181951,7 +181192,7 @@ "qname": "sklearn.model_selection._split.LeavePGroupsOut.__init__.n_groups", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int", "description": "Number of groups (``p``) to leave out in the test split." @@ -181963,7 +181204,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -182043,7 +181284,7 @@ "qname": "sklearn.model_selection._split.LeavePGroupsOut.get_n_splits.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -182056,7 +181297,7 @@ "qname": "sklearn.model_selection._split.LeavePGroupsOut.get_n_splits.X", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "object", "description": "Always ignored, exists for compatibility." @@ -182072,7 +181313,7 @@ "qname": "sklearn.model_selection._split.LeavePGroupsOut.get_n_splits.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "object", "description": "Always ignored, exists for compatibility." @@ -182088,7 +181329,7 @@ "qname": "sklearn.model_selection._split.LeavePGroupsOut.get_n_splits.groups", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "Group labels for the samples used while splitting the dataset into\ntrain/test set. This 'groups' parameter must always be specified to\ncalculate the number of splits, though the other parameters can be\nomitted." @@ -182100,7 +181341,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Returns the number of splitting iterations in the cross-validator", "docstring": "Returns the number of splitting iterations in the cross-validator\n\n Parameters\n ----------\n X : object\n Always ignored, exists for compatibility.\n\n y : object\n Always ignored, exists for compatibility.\n\n groups : array-like of shape (n_samples,)\n Group labels for the samples used while splitting the dataset into\n train/test set. This 'groups' parameter must always be specified to\n calculate the number of splits, though the other parameters can be\n omitted.\n\n Returns\n -------\n n_splits : int\n Returns the number of splitting iterations in the cross-validator.\n " }, @@ -182116,7 +181357,7 @@ "qname": "sklearn.model_selection._split.LeavePGroupsOut.split.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -182129,7 +181370,7 @@ "qname": "sklearn.model_selection._split.LeavePGroupsOut.split.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training data, where `n_samples` is the number of samples\nand `n_features` is the number of features." @@ -182145,7 +181386,7 @@ "qname": "sklearn.model_selection._split.LeavePGroupsOut.split.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "The target variable for supervised learning problems." @@ -182161,7 +181402,7 @@ "qname": "sklearn.model_selection._split.LeavePGroupsOut.split.groups", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "Group labels for the samples used while splitting the dataset into\ntrain/test set." @@ -182173,7 +181414,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Generate indices to split data into training and test set.", "docstring": "Generate indices to split data into training and test set.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n y : array-like of shape (n_samples,), default=None\n The target variable for supervised learning problems.\n\n groups : array-like of shape (n_samples,)\n Group labels for the samples used while splitting the dataset into\n train/test set.\n\n Yields\n ------\n train : ndarray\n The training set indices for that split.\n\n test : ndarray\n The testing set indices for that split.\n " }, @@ -182189,7 +181430,7 @@ "qname": "sklearn.model_selection._split.LeavePOut.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -182202,7 +181443,7 @@ "qname": "sklearn.model_selection._split.LeavePOut.__init__.p", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int", "description": "Size of the test sets. Must be strictly less than the number of\nsamples." @@ -182214,7 +181455,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -182294,7 +181535,7 @@ "qname": "sklearn.model_selection._split.LeavePOut.get_n_splits.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -182307,7 +181548,7 @@ "qname": "sklearn.model_selection._split.LeavePOut.get_n_splits.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training data, where `n_samples` is the number of samples\nand `n_features` is the number of features." @@ -182323,7 +181564,7 @@ "qname": "sklearn.model_selection._split.LeavePOut.get_n_splits.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "object", "description": "Always ignored, exists for compatibility." @@ -182339,7 +181580,7 @@ "qname": "sklearn.model_selection._split.LeavePOut.get_n_splits.groups", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "object", "description": "Always ignored, exists for compatibility." @@ -182351,7 +181592,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Returns the number of splitting iterations in the cross-validator", "docstring": "Returns the number of splitting iterations in the cross-validator\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n y : object\n Always ignored, exists for compatibility.\n\n groups : object\n Always ignored, exists for compatibility.\n " }, @@ -182367,7 +181608,7 @@ "qname": "sklearn.model_selection._split.PredefinedSplit.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -182380,7 +181621,7 @@ "qname": "sklearn.model_selection._split.PredefinedSplit.__init__.test_fold", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "The entry ``test_fold[i]`` represents the index of the test set that\nsample ``i`` belongs to. It is possible to exclude sample ``i`` from\nany test set (i.e. include sample ``i`` in every training set) by\nsetting ``test_fold[i]`` equal to -1." @@ -182392,7 +181633,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -182433,7 +181674,7 @@ "qname": "sklearn.model_selection._split.PredefinedSplit.get_n_splits.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -182446,7 +181687,7 @@ "qname": "sklearn.model_selection._split.PredefinedSplit.get_n_splits.X", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "object", "description": "Always ignored, exists for compatibility." @@ -182462,7 +181703,7 @@ "qname": "sklearn.model_selection._split.PredefinedSplit.get_n_splits.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "object", "description": "Always ignored, exists for compatibility." @@ -182478,7 +181719,7 @@ "qname": "sklearn.model_selection._split.PredefinedSplit.get_n_splits.groups", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "object", "description": "Always ignored, exists for compatibility." @@ -182490,7 +181731,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Returns the number of splitting iterations in the cross-validator", "docstring": "Returns the number of splitting iterations in the cross-validator\n\n Parameters\n ----------\n X : object\n Always ignored, exists for compatibility.\n\n y : object\n Always ignored, exists for compatibility.\n\n groups : object\n Always ignored, exists for compatibility.\n\n Returns\n -------\n n_splits : int\n Returns the number of splitting iterations in the cross-validator.\n " }, @@ -182506,7 +181747,7 @@ "qname": "sklearn.model_selection._split.PredefinedSplit.split.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -182519,7 +181760,7 @@ "qname": "sklearn.model_selection._split.PredefinedSplit.split.X", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "object", "description": "Always ignored, exists for compatibility." @@ -182535,7 +181776,7 @@ "qname": "sklearn.model_selection._split.PredefinedSplit.split.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "object", "description": "Always ignored, exists for compatibility." @@ -182551,7 +181792,7 @@ "qname": "sklearn.model_selection._split.PredefinedSplit.split.groups", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "object", "description": "Always ignored, exists for compatibility." @@ -182563,7 +181804,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Generate indices to split data into training and test set.", "docstring": "Generate indices to split data into training and test set.\n\n Parameters\n ----------\n X : object\n Always ignored, exists for compatibility.\n\n y : object\n Always ignored, exists for compatibility.\n\n groups : object\n Always ignored, exists for compatibility.\n\n Yields\n ------\n train : ndarray\n The training set indices for that split.\n\n test : ndarray\n The testing set indices for that split.\n " }, @@ -182579,7 +181820,7 @@ "qname": "sklearn.model_selection._split.RepeatedKFold.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -182592,7 +181833,7 @@ "qname": "sklearn.model_selection._split.RepeatedKFold.__init__.n_splits", "default_value": "5", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=5", "description": "Number of folds. Must be at least 2." @@ -182608,7 +181849,7 @@ "qname": "sklearn.model_selection._split.RepeatedKFold.__init__.n_repeats", "default_value": "10", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=10", "description": "Number of times cross-validator needs to be repeated." @@ -182624,7 +181865,7 @@ "qname": "sklearn.model_selection._split.RepeatedKFold.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Controls the randomness of each repeated cross-validation instance.\nPass an int for reproducible output across multiple function calls.\nSee :term:`Glossary `." @@ -182649,7 +181890,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -182665,7 +181906,7 @@ "qname": "sklearn.model_selection._split.RepeatedStratifiedKFold.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -182678,7 +181919,7 @@ "qname": "sklearn.model_selection._split.RepeatedStratifiedKFold.__init__.n_splits", "default_value": "5", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=5", "description": "Number of folds. Must be at least 2." @@ -182694,7 +181935,7 @@ "qname": "sklearn.model_selection._split.RepeatedStratifiedKFold.__init__.n_repeats", "default_value": "10", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=10", "description": "Number of times cross-validator needs to be repeated." @@ -182710,7 +181951,7 @@ "qname": "sklearn.model_selection._split.RepeatedStratifiedKFold.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Controls the generation of the random states for each repetition.\nPass an int for reproducible output across multiple function calls.\nSee :term:`Glossary `." @@ -182735,7 +181976,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -182751,7 +181992,7 @@ "qname": "sklearn.model_selection._split.ShuffleSplit.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -182764,7 +182005,7 @@ "qname": "sklearn.model_selection._split.ShuffleSplit.__init__.n_splits", "default_value": "10", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=10", "description": "Number of re-shuffling & splitting iterations." @@ -182780,7 +182021,7 @@ "qname": "sklearn.model_selection._split.ShuffleSplit.__init__.test_size", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float or int, default=None", "description": "If float, should be between 0.0 and 1.0 and represent the proportion\nof the dataset to include in the test split. If int, represents the\nabsolute number of test samples. If None, the value is set to the\ncomplement of the train size. If ``train_size`` is also None, it will\nbe set to 0.1." @@ -182805,7 +182046,7 @@ "qname": "sklearn.model_selection._split.ShuffleSplit.__init__.train_size", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float or int, default=None", "description": "If float, should be between 0.0 and 1.0 and represent the\nproportion of the dataset to include in the train split. If\nint, represents the absolute number of train samples. If None,\nthe value is automatically set to the complement of the test size." @@ -182830,7 +182071,7 @@ "qname": "sklearn.model_selection._split.ShuffleSplit.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Controls the randomness of the training and testing indices produced.\nPass an int for reproducible output across multiple function calls.\nSee :term:`Glossary `." @@ -182855,7 +182096,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -182935,7 +182176,7 @@ "qname": "sklearn.model_selection._split.StratifiedGroupKFold.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -182948,7 +182189,7 @@ "qname": "sklearn.model_selection._split.StratifiedGroupKFold.__init__.n_splits", "default_value": "5", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=5", "description": "Number of folds. Must be at least 2." @@ -182964,7 +182205,7 @@ "qname": "sklearn.model_selection._split.StratifiedGroupKFold.__init__.shuffle", "default_value": "False", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Whether to shuffle each class's samples before splitting into batches.\nNote that the samples within each split will not be shuffled.\nThis implementation can only shuffle groups that have approximately the\nsame y distribution, no global shuffle will be performed." @@ -182980,7 +182221,7 @@ "qname": "sklearn.model_selection._split.StratifiedGroupKFold.__init__.random_state", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int or RandomState instance, default=None", "description": "When `shuffle` is True, `random_state` affects the ordering of the\nindices, which controls the randomness of each fold for each class.\nOtherwise, leave `random_state` as `None`.\nPass an int for reproducible output across multiple function calls.\nSee :term:`Glossary `." @@ -183001,7 +182242,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -183145,7 +182386,7 @@ "qname": "sklearn.model_selection._split.StratifiedKFold.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -183158,7 +182399,7 @@ "qname": "sklearn.model_selection._split.StratifiedKFold.__init__.n_splits", "default_value": "5", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=5", "description": "Number of folds. Must be at least 2.\n\n.. versionchanged:: 0.22\n ``n_splits`` default value changed from 3 to 5." @@ -183174,7 +182415,7 @@ "qname": "sklearn.model_selection._split.StratifiedKFold.__init__.shuffle", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Whether to shuffle each class's samples before splitting into batches.\nNote that the samples within each split will not be shuffled." @@ -183190,7 +182431,7 @@ "qname": "sklearn.model_selection._split.StratifiedKFold.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "When `shuffle` is True, `random_state` affects the ordering of the\nindices, which controls the randomness of each fold for each class.\nOtherwise, leave `random_state` as `None`.\nPass an int for reproducible output across multiple function calls.\nSee :term:`Glossary `." @@ -183215,7 +182456,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -183346,7 +182587,7 @@ "qname": "sklearn.model_selection._split.StratifiedKFold.split.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -183359,7 +182600,7 @@ "qname": "sklearn.model_selection._split.StratifiedKFold.split.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training data, where `n_samples` is the number of samples\nand `n_features` is the number of features.\n\nNote that providing ``y`` is sufficient to generate the splits and\nhence ``np.zeros(n_samples)`` may be used as a placeholder for\n``X`` instead of actual training data." @@ -183375,7 +182616,7 @@ "qname": "sklearn.model_selection._split.StratifiedKFold.split.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "The target variable for supervised learning problems.\nStratification is done based on the y labels." @@ -183391,7 +182632,7 @@ "qname": "sklearn.model_selection._split.StratifiedKFold.split.groups", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "object", "description": "Always ignored, exists for compatibility." @@ -183403,7 +182644,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Generate indices to split data into training and test set.", "docstring": "Generate indices to split data into training and test set.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n Note that providing ``y`` is sufficient to generate the splits and\n hence ``np.zeros(n_samples)`` may be used as a placeholder for\n ``X`` instead of actual training data.\n\n y : array-like of shape (n_samples,)\n The target variable for supervised learning problems.\n Stratification is done based on the y labels.\n\n groups : object\n Always ignored, exists for compatibility.\n\n Yields\n ------\n train : ndarray\n The training set indices for that split.\n\n test : ndarray\n The testing set indices for that split.\n\n Notes\n -----\n Randomized CV splitters may return different results for each call of\n split. You can make the results identical by setting `random_state`\n to an integer.\n " }, @@ -183419,7 +182660,7 @@ "qname": "sklearn.model_selection._split.StratifiedShuffleSplit.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -183432,7 +182673,7 @@ "qname": "sklearn.model_selection._split.StratifiedShuffleSplit.__init__.n_splits", "default_value": "10", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=10", "description": "Number of re-shuffling & splitting iterations." @@ -183448,7 +182689,7 @@ "qname": "sklearn.model_selection._split.StratifiedShuffleSplit.__init__.test_size", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float or int, default=None", "description": "If float, should be between 0.0 and 1.0 and represent the proportion\nof the dataset to include in the test split. If int, represents the\nabsolute number of test samples. If None, the value is set to the\ncomplement of the train size. If ``train_size`` is also None, it will\nbe set to 0.1." @@ -183473,7 +182714,7 @@ "qname": "sklearn.model_selection._split.StratifiedShuffleSplit.__init__.train_size", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float or int, default=None", "description": "If float, should be between 0.0 and 1.0 and represent the\nproportion of the dataset to include in the train split. If\nint, represents the absolute number of train samples. If None,\nthe value is automatically set to the complement of the test size." @@ -183498,7 +182739,7 @@ "qname": "sklearn.model_selection._split.StratifiedShuffleSplit.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Controls the randomness of the training and testing indices produced.\nPass an int for reproducible output across multiple function calls.\nSee :term:`Glossary `." @@ -183523,7 +182764,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -183603,7 +182844,7 @@ "qname": "sklearn.model_selection._split.StratifiedShuffleSplit.split.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -183616,7 +182857,7 @@ "qname": "sklearn.model_selection._split.StratifiedShuffleSplit.split.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training data, where `n_samples` is the number of samples\nand `n_features` is the number of features.\n\nNote that providing ``y`` is sufficient to generate the splits and\nhence ``np.zeros(n_samples)`` may be used as a placeholder for\n``X`` instead of actual training data." @@ -183632,7 +182873,7 @@ "qname": "sklearn.model_selection._split.StratifiedShuffleSplit.split.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,) or (n_samples, n_labels)", "description": "The target variable for supervised learning problems.\nStratification is done based on the y labels." @@ -183648,7 +182889,7 @@ "qname": "sklearn.model_selection._split.StratifiedShuffleSplit.split.groups", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "object", "description": "Always ignored, exists for compatibility." @@ -183660,7 +182901,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Generate indices to split data into training and test set.", "docstring": "Generate indices to split data into training and test set.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n Note that providing ``y`` is sufficient to generate the splits and\n hence ``np.zeros(n_samples)`` may be used as a placeholder for\n ``X`` instead of actual training data.\n\n y : array-like of shape (n_samples,) or (n_samples, n_labels)\n The target variable for supervised learning problems.\n Stratification is done based on the y labels.\n\n groups : object\n Always ignored, exists for compatibility.\n\n Yields\n ------\n train : ndarray\n The training set indices for that split.\n\n test : ndarray\n The testing set indices for that split.\n\n Notes\n -----\n Randomized CV splitters may return different results for each call of\n split. You can make the results identical by setting `random_state`\n to an integer.\n " }, @@ -183676,7 +182917,7 @@ "qname": "sklearn.model_selection._split.TimeSeriesSplit.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -183689,7 +182930,7 @@ "qname": "sklearn.model_selection._split.TimeSeriesSplit.__init__.n_splits", "default_value": "5", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=5", "description": "Number of splits. Must be at least 2.\n\n.. versionchanged:: 0.22\n ``n_splits`` default value changed from 3 to 5." @@ -183705,7 +182946,7 @@ "qname": "sklearn.model_selection._split.TimeSeriesSplit.__init__.max_train_size", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Maximum size for a single training set." @@ -183721,7 +182962,7 @@ "qname": "sklearn.model_selection._split.TimeSeriesSplit.__init__.test_size", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Used to limit the size of the test set. Defaults to\n``n_samples // (n_splits + 1)``, which is the maximum allowed value\nwith ``gap=0``.\n\n.. versionadded:: 0.24" @@ -183737,7 +182978,7 @@ "qname": "sklearn.model_selection._split.TimeSeriesSplit.__init__.gap", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "Number of samples to exclude from the end of each train set before\nthe test set.\n\n.. versionadded:: 0.24" @@ -183749,7 +182990,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -183765,7 +183006,7 @@ "qname": "sklearn.model_selection._split.TimeSeriesSplit.split.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -183778,7 +183019,7 @@ "qname": "sklearn.model_selection._split.TimeSeriesSplit.split.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training data, where `n_samples` is the number of samples\nand `n_features` is the number of features." @@ -183794,7 +183035,7 @@ "qname": "sklearn.model_selection._split.TimeSeriesSplit.split.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "Always ignored, exists for compatibility." @@ -183810,7 +183051,7 @@ "qname": "sklearn.model_selection._split.TimeSeriesSplit.split.groups", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "Always ignored, exists for compatibility." @@ -183822,7 +183063,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Generate indices to split data into training and test set.", "docstring": "Generate indices to split data into training and test set.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n y : array-like of shape (n_samples,)\n Always ignored, exists for compatibility.\n\n groups : array-like of shape (n_samples,)\n Always ignored, exists for compatibility.\n\n Yields\n ------\n train : ndarray\n The training set indices for that split.\n\n test : ndarray\n The testing set indices for that split.\n " }, @@ -186170,7 +185411,7 @@ }, "type": { "kind": "EnumType", - "values": ["decision_function", "predict", "predict_proba", "predict_log_proba"] + "values": ["predict_log_proba", "predict_proba", "predict", "decision_function"] } } ], @@ -193999,7 +193240,7 @@ }, "type": { "kind": "EnumType", - "values": ["distance", "uniform"] + "values": ["uniform", "distance"] } } ], @@ -194206,66 +193447,6 @@ "description": "Helper for the Parallel calls in RadiusNeighborsMixin.radius_neighbors.\n\nThe Cython method tree.query_radius is not directly picklable by\ncloudpickle under PyPy.", "docstring": "Helper for the Parallel calls in RadiusNeighborsMixin.radius_neighbors.\n\n The Cython method tree.query_radius is not directly picklable by\n cloudpickle under PyPy.\n " }, - { - "id": "sklearn/sklearn.neighbors._base/sort_graph_by_row_values", - "name": "sort_graph_by_row_values", - "qname": "sklearn.neighbors._base.sort_graph_by_row_values", - "decorators": [], - "parameters": [ - { - "id": "sklearn/sklearn.neighbors._base/sort_graph_by_row_values/graph", - "name": "graph", - "qname": "sklearn.neighbors._base.sort_graph_by_row_values.graph", - "default_value": null, - "assigned_by": "POSITION_OR_NAME", - "is_public": true, - "docstring": { - "type": "sparse matrix of shape (n_samples, n_samples)", - "description": "Distance matrix to other samples, where only non-zero elements are\nconsidered neighbors. Matrix is converted to CSR format if not already." - }, - "type": { - "kind": "NamedType", - "name": "sparse matrix of shape (n_samples, n_samples)" - } - }, - { - "id": "sklearn/sklearn.neighbors._base/sort_graph_by_row_values/copy", - "name": "copy", - "qname": "sklearn.neighbors._base.sort_graph_by_row_values.copy", - "default_value": "False", - "assigned_by": "POSITION_OR_NAME", - "is_public": true, - "docstring": { - "type": "bool, default=False", - "description": "If True, the graph is copied before sorting. If False, the sorting is\nperformed inplace. If the graph is not of CSR format, `copy` must be\nTrue to allow the conversion to CSR format, otherwise an error is\nraised." - }, - "type": { - "kind": "NamedType", - "name": "bool" - } - }, - { - "id": "sklearn/sklearn.neighbors._base/sort_graph_by_row_values/warn_when_not_sorted", - "name": "warn_when_not_sorted", - "qname": "sklearn.neighbors._base.sort_graph_by_row_values.warn_when_not_sorted", - "default_value": "True", - "assigned_by": "POSITION_OR_NAME", - "is_public": true, - "docstring": { - "type": "bool, default=True", - "description": "If True, a :class:`~sklearn.exceptions.EfficiencyWarning` is raised\nwhen the input graph is not sorted by row values." - }, - "type": { - "kind": "NamedType", - "name": "bool" - } - } - ], - "results": [], - "is_public": true, - "description": "Sort a sparse graph such that each row is stored with increasing values.\n\n.. versionadded:: 1.2", - "docstring": "Sort a sparse graph such that each row is stored with increasing values.\n\n .. versionadded:: 1.2\n\n Parameters\n ----------\n graph : sparse matrix of shape (n_samples, n_samples)\n Distance matrix to other samples, where only non-zero elements are\n considered neighbors. Matrix is converted to CSR format if not already.\n\n copy : bool, default=False\n If True, the graph is copied before sorting. If False, the sorting is\n performed inplace. If the graph is not of CSR format, `copy` must be\n True to allow the conversion to CSR format, otherwise an error is\n raised.\n\n warn_when_not_sorted : bool, default=True\n If True, a :class:`~sklearn.exceptions.EfficiencyWarning` is raised\n when the input graph is not sorted by row values.\n\n Returns\n -------\n graph : sparse matrix of shape (n_samples, n_samples)\n Distance matrix to other samples, where only non-zero elements are\n considered neighbors. Matrix is in CSR format.\n " - }, { "id": "sklearn/sklearn.neighbors._classification/KNeighborsClassifier/__init__", "name": "__init__", @@ -194278,7 +193459,7 @@ "qname": "sklearn.neighbors._classification.KNeighborsClassifier.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -194291,7 +193472,7 @@ "qname": "sklearn.neighbors._classification.KNeighborsClassifier.__init__.n_neighbors", "default_value": "5", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=5", "description": "Number of neighbors to use by default for :meth:`kneighbors` queries." @@ -194307,7 +193488,7 @@ "qname": "sklearn.neighbors._classification.KNeighborsClassifier.__init__.weights", "default_value": "'uniform'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'uniform', 'distance'} or callable, default='uniform'", "description": "Weight function used in prediction. Possible values:\n\n- 'uniform' : uniform weights. All points in each neighborhood\n are weighted equally.\n- 'distance' : weight points by the inverse of their distance.\n in this case, closer neighbors of a query point will have a\n greater influence than neighbors which are further away.\n- [callable] : a user-defined function which accepts an\n array of distances, and returns an array of the same shape\n containing the weights." @@ -194317,7 +193498,7 @@ "types": [ { "kind": "EnumType", - "values": ["distance", "uniform"] + "values": ["uniform", "distance"] }, { "kind": "NamedType", @@ -194332,14 +193513,14 @@ "qname": "sklearn.neighbors._classification.KNeighborsClassifier.__init__.algorithm", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'", "description": "Algorithm used to compute the nearest neighbors:\n\n- 'ball_tree' will use :class:`BallTree`\n- 'kd_tree' will use :class:`KDTree`\n- 'brute' will use a brute-force search.\n- 'auto' will attempt to decide the most appropriate algorithm\n based on the values passed to :meth:`fit` method.\n\nNote: fitting on sparse input will override the setting of\nthis parameter, using brute force." }, "type": { "kind": "EnumType", - "values": ["auto", "kd_tree", "ball_tree", "brute"] + "values": ["auto", "kd_tree", "brute", "ball_tree"] } }, { @@ -194348,7 +193529,7 @@ "qname": "sklearn.neighbors._classification.KNeighborsClassifier.__init__.leaf_size", "default_value": "30", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=30", "description": "Leaf size passed to BallTree or KDTree. This can affect the\nspeed of the construction and query, as well as the memory\nrequired to store the tree. The optimal value depends on the\nnature of the problem." @@ -194364,7 +193545,7 @@ "qname": "sklearn.neighbors._classification.KNeighborsClassifier.__init__.p", "default_value": "2", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=2", "description": "Power parameter for the Minkowski metric. When p = 1, this is\nequivalent to using manhattan_distance (l1), and euclidean_distance\n(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used." @@ -194380,7 +193561,7 @@ "qname": "sklearn.neighbors._classification.KNeighborsClassifier.__init__.metric", "default_value": "'minkowski'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str or callable, default='minkowski'", "description": "The distance metric to use for the tree. The default metric is\nminkowski, and with p=2 is equivalent to the standard Euclidean\nmetric. For a list of available metrics, see the documentation of\n:class:`~sklearn.metrics.DistanceMetric` and the metrics listed in\n`sklearn.metrics.pairwise.PAIRWISE_DISTANCE_FUNCTIONS`. Note that the\n\"cosine\" metric uses :func:`~sklearn.metrics.pairwise.cosine_distances`.\nIf metric is \"precomputed\", X is assumed to be a distance matrix and\nmust be square during fit. X may be a :term:`sparse graph`,\nin which case only \"nonzero\" elements may be considered neighbors." @@ -194405,7 +193586,7 @@ "qname": "sklearn.neighbors._classification.KNeighborsClassifier.__init__.metric_params", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict, default=None", "description": "Additional keyword arguments for the metric function." @@ -194421,7 +193602,7 @@ "qname": "sklearn.neighbors._classification.KNeighborsClassifier.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "The number of parallel jobs to run for neighbors search.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details.\nDoesn't affect :meth:`fit` method." @@ -194433,7 +193614,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -194474,7 +193655,7 @@ "qname": "sklearn.neighbors._classification.KNeighborsClassifier.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -194487,7 +193668,7 @@ "qname": "sklearn.neighbors._classification.KNeighborsClassifier.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_samples) if metric='precomputed'", "description": "Training data." @@ -194512,7 +193693,7 @@ "qname": "sklearn.neighbors._classification.KNeighborsClassifier.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_outputs)", "description": "Target values." @@ -194533,7 +193714,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the k-nearest neighbors classifier from the training dataset.", "docstring": "Fit the k-nearest neighbors classifier from the training dataset.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_samples) if metric='precomputed'\n Training data.\n\n y : {array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_outputs)\n Target values.\n\n Returns\n -------\n self : KNeighborsClassifier\n The fitted k-nearest neighbors classifier.\n " }, @@ -194549,7 +193730,7 @@ "qname": "sklearn.neighbors._classification.KNeighborsClassifier.predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -194562,7 +193743,7 @@ "qname": "sklearn.neighbors._classification.KNeighborsClassifier.predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_queries, n_features), or (n_queries, n_indexed) if metric == 'precomputed'", "description": "Test samples." @@ -194583,7 +193764,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict the class labels for the provided data.", "docstring": "Predict the class labels for the provided data.\n\n Parameters\n ----------\n X : array-like of shape (n_queries, n_features), or (n_queries, n_indexed) if metric == 'precomputed'\n Test samples.\n\n Returns\n -------\n y : ndarray of shape (n_queries,) or (n_queries, n_outputs)\n Class labels for each data sample.\n " }, @@ -194599,7 +193780,7 @@ "qname": "sklearn.neighbors._classification.KNeighborsClassifier.predict_proba.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -194612,7 +193793,7 @@ "qname": "sklearn.neighbors._classification.KNeighborsClassifier.predict_proba.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_queries, n_features), or (n_queries, n_indexed) if metric == 'precomputed'", "description": "Test samples." @@ -194633,7 +193814,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Return probability estimates for the test data X.", "docstring": "Return probability estimates for the test data X.\n\n Parameters\n ----------\n X : array-like of shape (n_queries, n_features), or (n_queries, n_indexed) if metric == 'precomputed'\n Test samples.\n\n Returns\n -------\n p : ndarray of shape (n_queries, n_classes), or a list of n_outputs of such arrays if n_outputs > 1.\n The class probabilities of the input samples. Classes are ordered\n by lexicographic order.\n " }, @@ -194649,7 +193830,7 @@ "qname": "sklearn.neighbors._classification.RadiusNeighborsClassifier.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -194662,7 +193843,7 @@ "qname": "sklearn.neighbors._classification.RadiusNeighborsClassifier.__init__.radius", "default_value": "1.0", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1.0", "description": "Range of parameter space to use by default for :meth:`radius_neighbors`\nqueries." @@ -194678,7 +193859,7 @@ "qname": "sklearn.neighbors._classification.RadiusNeighborsClassifier.__init__.weights", "default_value": "'uniform'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'uniform', 'distance'} or callable, default='uniform'", "description": "Weight function used in prediction. Possible values:\n\n- 'uniform' : uniform weights. All points in each neighborhood\n are weighted equally.\n- 'distance' : weight points by the inverse of their distance.\n in this case, closer neighbors of a query point will have a\n greater influence than neighbors which are further away.\n- [callable] : a user-defined function which accepts an\n array of distances, and returns an array of the same shape\n containing the weights.\n\nUniform weights are used by default." @@ -194688,7 +193869,7 @@ "types": [ { "kind": "EnumType", - "values": ["distance", "uniform"] + "values": ["uniform", "distance"] }, { "kind": "NamedType", @@ -194703,14 +193884,14 @@ "qname": "sklearn.neighbors._classification.RadiusNeighborsClassifier.__init__.algorithm", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'", "description": "Algorithm used to compute the nearest neighbors:\n\n- 'ball_tree' will use :class:`BallTree`\n- 'kd_tree' will use :class:`KDTree`\n- 'brute' will use a brute-force search.\n- 'auto' will attempt to decide the most appropriate algorithm\n based on the values passed to :meth:`fit` method.\n\nNote: fitting on sparse input will override the setting of\nthis parameter, using brute force." }, "type": { "kind": "EnumType", - "values": ["auto", "kd_tree", "ball_tree", "brute"] + "values": ["auto", "kd_tree", "brute", "ball_tree"] } }, { @@ -194719,7 +193900,7 @@ "qname": "sklearn.neighbors._classification.RadiusNeighborsClassifier.__init__.leaf_size", "default_value": "30", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=30", "description": "Leaf size passed to BallTree or KDTree. This can affect the\nspeed of the construction and query, as well as the memory\nrequired to store the tree. The optimal value depends on the\nnature of the problem." @@ -194735,7 +193916,7 @@ "qname": "sklearn.neighbors._classification.RadiusNeighborsClassifier.__init__.p", "default_value": "2", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=2", "description": "Power parameter for the Minkowski metric. When p = 1, this is\nequivalent to using manhattan_distance (l1), and euclidean_distance\n(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used." @@ -194751,7 +193932,7 @@ "qname": "sklearn.neighbors._classification.RadiusNeighborsClassifier.__init__.metric", "default_value": "'minkowski'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str or callable, default='minkowski'", "description": "Distance metric to use for the tree. The default metric is\nminkowski, and with p=2 is equivalent to the standard Euclidean\nmetric. For a list of available metrics, see the documentation of\n:class:`~sklearn.metrics.DistanceMetric`.\nIf metric is \"precomputed\", X is assumed to be a distance matrix and\nmust be square during fit. X may be a :term:`sparse graph`,\nin which case only \"nonzero\" elements may be considered neighbors." @@ -194776,7 +193957,7 @@ "qname": "sklearn.neighbors._classification.RadiusNeighborsClassifier.__init__.outlier_label", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{manual label, 'most_frequent'}, default=None", "description": "Label for outlier samples (samples with no neighbors in given radius).\n\n- manual label: str or int label (should be the same type as y)\n or list of manual labels if multi-output is used.\n- 'most_frequent' : assign the most frequent label of y to outliers.\n- None : when any outlier is detected, ValueError will be raised." @@ -194792,7 +193973,7 @@ "qname": "sklearn.neighbors._classification.RadiusNeighborsClassifier.__init__.metric_params", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict, default=None", "description": "Additional keyword arguments for the metric function." @@ -194808,7 +193989,7 @@ "qname": "sklearn.neighbors._classification.RadiusNeighborsClassifier.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "The number of parallel jobs to run for neighbors search.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details." @@ -194820,7 +194001,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -194861,7 +194042,7 @@ "qname": "sklearn.neighbors._classification.RadiusNeighborsClassifier.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -194874,7 +194055,7 @@ "qname": "sklearn.neighbors._classification.RadiusNeighborsClassifier.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_samples) if metric='precomputed'", "description": "Training data." @@ -194899,7 +194080,7 @@ "qname": "sklearn.neighbors._classification.RadiusNeighborsClassifier.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_outputs)", "description": "Target values." @@ -194920,7 +194101,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the radius neighbors classifier from the training dataset.", "docstring": "Fit the radius neighbors classifier from the training dataset.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_samples) if metric='precomputed'\n Training data.\n\n y : {array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_outputs)\n Target values.\n\n Returns\n -------\n self : RadiusNeighborsClassifier\n The fitted radius neighbors classifier.\n " }, @@ -194936,7 +194117,7 @@ "qname": "sklearn.neighbors._classification.RadiusNeighborsClassifier.predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -194949,7 +194130,7 @@ "qname": "sklearn.neighbors._classification.RadiusNeighborsClassifier.predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_queries, n_features), or (n_queries, n_indexed) if metric == 'precomputed'", "description": "Test samples." @@ -194970,7 +194151,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict the class labels for the provided data.", "docstring": "Predict the class labels for the provided data.\n\n Parameters\n ----------\n X : array-like of shape (n_queries, n_features), or (n_queries, n_indexed) if metric == 'precomputed'\n Test samples.\n\n Returns\n -------\n y : ndarray of shape (n_queries,) or (n_queries, n_outputs)\n Class labels for each data sample.\n " }, @@ -194986,7 +194167,7 @@ "qname": "sklearn.neighbors._classification.RadiusNeighborsClassifier.predict_proba.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -194999,7 +194180,7 @@ "qname": "sklearn.neighbors._classification.RadiusNeighborsClassifier.predict_proba.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_queries, n_features), or (n_queries, n_indexed) if metric == 'precomputed'", "description": "Test samples." @@ -195020,7 +194201,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Return probability estimates for the test data X.", "docstring": "Return probability estimates for the test data X.\n\n Parameters\n ----------\n X : array-like of shape (n_queries, n_features), or (n_queries, n_indexed) if metric == 'precomputed'\n Test samples.\n\n Returns\n -------\n p : ndarray of shape (n_queries, n_classes), or a list of n_outputs of such arrays if n_outputs > 1.\n The class probabilities of the input samples. Classes are ordered\n by lexicographic order.\n " }, @@ -195061,7 +194242,7 @@ "qname": "sklearn.neighbors._distance_metric.DistanceMetric.get_metric.cls", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -195074,7 +194255,7 @@ "qname": "sklearn.neighbors._distance_metric.DistanceMetric.get_metric.metric", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -195083,7 +194264,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -195099,7 +194280,7 @@ "qname": "sklearn.neighbors._graph.KNeighborsTransformer.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -195112,14 +194293,14 @@ "qname": "sklearn.neighbors._graph.KNeighborsTransformer.__init__.mode", "default_value": "'distance'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'distance', 'connectivity'}, default='distance'", "description": "Type of returned matrix: 'connectivity' will return the connectivity\nmatrix with ones and zeros, and 'distance' will return the distances\nbetween neighbors according to the given metric." }, "type": { "kind": "EnumType", - "values": ["distance", "connectivity"] + "values": ["connectivity", "distance"] } }, { @@ -195128,7 +194309,7 @@ "qname": "sklearn.neighbors._graph.KNeighborsTransformer.__init__.n_neighbors", "default_value": "5", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=5", "description": "Number of neighbors for each sample in the transformed sparse graph.\nFor compatibility reasons, as each sample is considered as its own\nneighbor, one extra neighbor will be computed when mode == 'distance'.\nIn this case, the sparse graph contains (n_neighbors + 1) neighbors." @@ -195144,14 +194325,14 @@ "qname": "sklearn.neighbors._graph.KNeighborsTransformer.__init__.algorithm", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'", "description": "Algorithm used to compute the nearest neighbors:\n\n- 'ball_tree' will use :class:`BallTree`\n- 'kd_tree' will use :class:`KDTree`\n- 'brute' will use a brute-force search.\n- 'auto' will attempt to decide the most appropriate algorithm\n based on the values passed to :meth:`fit` method.\n\nNote: fitting on sparse input will override the setting of\nthis parameter, using brute force." }, "type": { "kind": "EnumType", - "values": ["auto", "kd_tree", "ball_tree", "brute"] + "values": ["auto", "kd_tree", "brute", "ball_tree"] } }, { @@ -195160,7 +194341,7 @@ "qname": "sklearn.neighbors._graph.KNeighborsTransformer.__init__.leaf_size", "default_value": "30", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=30", "description": "Leaf size passed to BallTree or KDTree. This can affect the\nspeed of the construction and query, as well as the memory\nrequired to store the tree. The optimal value depends on the\nnature of the problem." @@ -195176,7 +194357,7 @@ "qname": "sklearn.neighbors._graph.KNeighborsTransformer.__init__.metric", "default_value": "'minkowski'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str or callable, default='minkowski'", "description": "Metric to use for distance computation. Any metric from scikit-learn\nor scipy.spatial.distance can be used.\n\nIf metric is a callable function, it is called on each\npair of instances (rows) and the resulting value recorded. The callable\nshould take two arrays as input and return one value indicating the\ndistance between them. This works for Scipy's metrics, but is less\nefficient than passing the metric name as a string.\n\nDistance matrices are not supported.\n\nValid values for metric are:\n\n- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',\n 'manhattan']\n\n- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',\n 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',\n 'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',\n 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',\n 'yule']\n\nSee the documentation for scipy.spatial.distance for details on these\nmetrics." @@ -195201,7 +194382,7 @@ "qname": "sklearn.neighbors._graph.KNeighborsTransformer.__init__.p", "default_value": "2", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=2", "description": "Parameter for the Minkowski metric from\nsklearn.metrics.pairwise.pairwise_distances. When p = 1, this is\nequivalent to using manhattan_distance (l1), and euclidean_distance\n(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used." @@ -195217,7 +194398,7 @@ "qname": "sklearn.neighbors._graph.KNeighborsTransformer.__init__.metric_params", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict, default=None", "description": "Additional keyword arguments for the metric function." @@ -195233,7 +194414,7 @@ "qname": "sklearn.neighbors._graph.KNeighborsTransformer.__init__.n_jobs", "default_value": "1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1", "description": "The number of parallel jobs to run for neighbors search.\nIf ``-1``, then the number of jobs is set to the number of CPU cores." @@ -195245,7 +194426,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -195286,7 +194467,7 @@ "qname": "sklearn.neighbors._graph.KNeighborsTransformer.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -195299,7 +194480,7 @@ "qname": "sklearn.neighbors._graph.KNeighborsTransformer.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_samples) if metric='precomputed'", "description": "Training data." @@ -195324,7 +194505,7 @@ "qname": "sklearn.neighbors._graph.KNeighborsTransformer.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -195336,7 +194517,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the k-nearest neighbors transformer from the training dataset.", "docstring": "Fit the k-nearest neighbors transformer from the training dataset.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_samples) if metric='precomputed'\n Training data.\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n self : KNeighborsTransformer\n The fitted k-nearest neighbors transformer.\n " }, @@ -195352,7 +194533,7 @@ "qname": "sklearn.neighbors._graph.KNeighborsTransformer.fit_transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -195365,7 +194546,7 @@ "qname": "sklearn.neighbors._graph.KNeighborsTransformer.fit_transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training set." @@ -195381,7 +194562,7 @@ "qname": "sklearn.neighbors._graph.KNeighborsTransformer.fit_transform.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -195393,7 +194574,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit to data, then transform it.\n\nFits transformer to X and y with optional parameters fit_params\nand returns a transformed version of X.", "docstring": "Fit to data, then transform it.\n\n Fits transformer to X and y with optional parameters fit_params\n and returns a transformed version of X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training set.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n Xt : sparse matrix of shape (n_samples, n_samples)\n Xt[i, j] is assigned the weight of edge that connects i to j.\n Only the neighbors have an explicit value.\n The diagonal is always explicit.\n The matrix is of CSR format.\n " }, @@ -195409,7 +194590,7 @@ "qname": "sklearn.neighbors._graph.KNeighborsTransformer.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -195422,7 +194603,7 @@ "qname": "sklearn.neighbors._graph.KNeighborsTransformer.transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples_transform, n_features)", "description": "Sample data." @@ -195434,7 +194615,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute the (weighted) graph of Neighbors for points in X.", "docstring": "Compute the (weighted) graph of Neighbors for points in X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples_transform, n_features)\n Sample data.\n\n Returns\n -------\n Xt : sparse matrix of shape (n_samples_transform, n_samples_fit)\n Xt[i, j] is assigned the weight of edge that connects i to j.\n Only the neighbors have an explicit value.\n The diagonal is always explicit.\n The matrix is of CSR format.\n " }, @@ -195450,7 +194631,7 @@ "qname": "sklearn.neighbors._graph.RadiusNeighborsTransformer.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -195463,14 +194644,14 @@ "qname": "sklearn.neighbors._graph.RadiusNeighborsTransformer.__init__.mode", "default_value": "'distance'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'distance', 'connectivity'}, default='distance'", "description": "Type of returned matrix: 'connectivity' will return the connectivity\nmatrix with ones and zeros, and 'distance' will return the distances\nbetween neighbors according to the given metric." }, "type": { "kind": "EnumType", - "values": ["distance", "connectivity"] + "values": ["connectivity", "distance"] } }, { @@ -195479,7 +194660,7 @@ "qname": "sklearn.neighbors._graph.RadiusNeighborsTransformer.__init__.radius", "default_value": "1.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1.0", "description": "Radius of neighborhood in the transformed sparse graph." @@ -195495,14 +194676,14 @@ "qname": "sklearn.neighbors._graph.RadiusNeighborsTransformer.__init__.algorithm", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'", "description": "Algorithm used to compute the nearest neighbors:\n\n- 'ball_tree' will use :class:`BallTree`\n- 'kd_tree' will use :class:`KDTree`\n- 'brute' will use a brute-force search.\n- 'auto' will attempt to decide the most appropriate algorithm\n based on the values passed to :meth:`fit` method.\n\nNote: fitting on sparse input will override the setting of\nthis parameter, using brute force." }, "type": { "kind": "EnumType", - "values": ["auto", "kd_tree", "ball_tree", "brute"] + "values": ["auto", "kd_tree", "brute", "ball_tree"] } }, { @@ -195511,7 +194692,7 @@ "qname": "sklearn.neighbors._graph.RadiusNeighborsTransformer.__init__.leaf_size", "default_value": "30", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=30", "description": "Leaf size passed to BallTree or KDTree. This can affect the\nspeed of the construction and query, as well as the memory\nrequired to store the tree. The optimal value depends on the\nnature of the problem." @@ -195527,7 +194708,7 @@ "qname": "sklearn.neighbors._graph.RadiusNeighborsTransformer.__init__.metric", "default_value": "'minkowski'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str or callable, default='minkowski'", "description": "Metric to use for distance computation. Any metric from scikit-learn\nor scipy.spatial.distance can be used.\n\nIf metric is a callable function, it is called on each\npair of instances (rows) and the resulting value recorded. The callable\nshould take two arrays as input and return one value indicating the\ndistance between them. This works for Scipy's metrics, but is less\nefficient than passing the metric name as a string.\n\nDistance matrices are not supported.\n\nValid values for metric are:\n\n- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',\n 'manhattan']\n\n- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',\n 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',\n 'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',\n 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',\n 'yule']\n\nSee the documentation for scipy.spatial.distance for details on these\nmetrics." @@ -195552,7 +194733,7 @@ "qname": "sklearn.neighbors._graph.RadiusNeighborsTransformer.__init__.p", "default_value": "2", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=2", "description": "Parameter for the Minkowski metric from\nsklearn.metrics.pairwise.pairwise_distances. When p = 1, this is\nequivalent to using manhattan_distance (l1), and euclidean_distance\n(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used." @@ -195568,7 +194749,7 @@ "qname": "sklearn.neighbors._graph.RadiusNeighborsTransformer.__init__.metric_params", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict, default=None", "description": "Additional keyword arguments for the metric function." @@ -195584,7 +194765,7 @@ "qname": "sklearn.neighbors._graph.RadiusNeighborsTransformer.__init__.n_jobs", "default_value": "1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1", "description": "The number of parallel jobs to run for neighbors search.\nIf ``-1``, then the number of jobs is set to the number of CPU cores." @@ -195596,7 +194777,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -195637,7 +194818,7 @@ "qname": "sklearn.neighbors._graph.RadiusNeighborsTransformer.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -195650,7 +194831,7 @@ "qname": "sklearn.neighbors._graph.RadiusNeighborsTransformer.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_samples) if metric='precomputed'", "description": "Training data." @@ -195675,7 +194856,7 @@ "qname": "sklearn.neighbors._graph.RadiusNeighborsTransformer.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -195687,7 +194868,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the radius neighbors transformer from the training dataset.", "docstring": "Fit the radius neighbors transformer from the training dataset.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_samples) if metric='precomputed'\n Training data.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n self : RadiusNeighborsTransformer\n The fitted radius neighbors transformer.\n " }, @@ -195703,7 +194884,7 @@ "qname": "sklearn.neighbors._graph.RadiusNeighborsTransformer.fit_transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -195716,7 +194897,7 @@ "qname": "sklearn.neighbors._graph.RadiusNeighborsTransformer.fit_transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training set." @@ -195732,7 +194913,7 @@ "qname": "sklearn.neighbors._graph.RadiusNeighborsTransformer.fit_transform.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -195744,7 +194925,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit to data, then transform it.\n\nFits transformer to X and y with optional parameters fit_params\nand returns a transformed version of X.", "docstring": "Fit to data, then transform it.\n\n Fits transformer to X and y with optional parameters fit_params\n and returns a transformed version of X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training set.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n Xt : sparse matrix of shape (n_samples, n_samples)\n Xt[i, j] is assigned the weight of edge that connects i to j.\n Only the neighbors have an explicit value.\n The diagonal is always explicit.\n The matrix is of CSR format.\n " }, @@ -195760,7 +194941,7 @@ "qname": "sklearn.neighbors._graph.RadiusNeighborsTransformer.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -195773,7 +194954,7 @@ "qname": "sklearn.neighbors._graph.RadiusNeighborsTransformer.transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples_transform, n_features)", "description": "Sample data." @@ -195785,7 +194966,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute the (weighted) graph of Neighbors for points in X.", "docstring": "Compute the (weighted) graph of Neighbors for points in X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples_transform, n_features)\n Sample data.\n\n Returns\n -------\n Xt : sparse matrix of shape (n_samples_transform, n_samples_fit)\n Xt[i, j] is assigned the weight of edge that connects i to j.\n Only the neighbors have an explicit value.\n The diagonal is always explicit.\n The matrix is of CSR format.\n " }, @@ -196232,7 +195413,7 @@ "qname": "sklearn.neighbors._kde.KernelDensity.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -196245,23 +195426,14 @@ "qname": "sklearn.neighbors._kde.KernelDensity.__init__.bandwidth", "default_value": "1.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { - "type": "float or {\"scott\", \"silverman\"}, default=1.0", - "description": "The bandwidth of the kernel. If bandwidth is a float, it defines the\nbandwidth of the kernel. If bandwidth is a string, one of the estimation\nmethods is implemented." + "type": "float, default=1.0", + "description": "The bandwidth of the kernel." }, "type": { - "kind": "UnionType", - "types": [ - { - "kind": "EnumType", - "values": ["silverman", "scott"] - }, - { - "kind": "NamedType", - "name": "float" - } - ] + "kind": "NamedType", + "name": "float" } }, { @@ -196270,7 +195442,7 @@ "qname": "sklearn.neighbors._kde.KernelDensity.__init__.algorithm", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'kd_tree', 'ball_tree', 'auto'}, default='auto'", "description": "The tree algorithm to use." @@ -196286,14 +195458,14 @@ "qname": "sklearn.neighbors._kde.KernelDensity.__init__.kernel", "default_value": "'gaussian'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'gaussian', 'tophat', 'epanechnikov', 'exponential', 'linear', 'cosine'}, default='gaussian'", "description": "The kernel to use." }, "type": { "kind": "EnumType", - "values": ["cosine", "linear", "exponential", "epanechnikov", "tophat", "gaussian"] + "values": ["epanechnikov", "gaussian", "linear", "tophat", "exponential", "cosine"] } }, { @@ -196302,7 +195474,7 @@ "qname": "sklearn.neighbors._kde.KernelDensity.__init__.metric", "default_value": "'euclidean'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str, default='euclidean'", "description": "The distance metric to use. Note that not all metrics are\nvalid with all algorithms. Refer to the documentation of\n:class:`BallTree` and :class:`KDTree` for a description of\navailable algorithms. Note that the normalization of the density\noutput is correct only for the Euclidean distance metric. Default\nis 'euclidean'." @@ -196318,7 +195490,7 @@ "qname": "sklearn.neighbors._kde.KernelDensity.__init__.atol", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0", "description": "The desired absolute tolerance of the result. A larger tolerance will\ngenerally lead to faster execution." @@ -196334,7 +195506,7 @@ "qname": "sklearn.neighbors._kde.KernelDensity.__init__.rtol", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0", "description": "The desired relative tolerance of the result. A larger tolerance will\ngenerally lead to faster execution." @@ -196350,7 +195522,7 @@ "qname": "sklearn.neighbors._kde.KernelDensity.__init__.breadth_first", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If true (default), use a breadth-first approach to the problem.\nOtherwise use a depth-first approach." @@ -196366,7 +195538,7 @@ "qname": "sklearn.neighbors._kde.KernelDensity.__init__.leaf_size", "default_value": "40", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=40", "description": "Specify the leaf size of the underlying tree. See :class:`BallTree`\nor :class:`KDTree` for details." @@ -196382,7 +195554,7 @@ "qname": "sklearn.neighbors._kde.KernelDensity.__init__.metric_params", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict, default=None", "description": "Additional parameters to be passed to the tree for use with the\nmetric. For more information, see the documentation of\n:class:`BallTree` or :class:`KDTree`." @@ -196394,7 +195566,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -196486,7 +195658,7 @@ "qname": "sklearn.neighbors._kde.KernelDensity.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -196499,7 +195671,7 @@ "qname": "sklearn.neighbors._kde.KernelDensity.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "List of n_features-dimensional data points. Each row\ncorresponds to a single data point." @@ -196515,7 +195687,7 @@ "qname": "sklearn.neighbors._kde.KernelDensity.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "None", "description": "Ignored. This parameter exists only for compatibility with\n:class:`~sklearn.pipeline.Pipeline`." @@ -196531,7 +195703,7 @@ "qname": "sklearn.neighbors._kde.KernelDensity.fit.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "List of sample weights attached to the data X.\n\n.. versionadded:: 0.20" @@ -196543,7 +195715,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the Kernel Density model on the data.", "docstring": "Fit the Kernel Density model on the data.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n List of n_features-dimensional data points. Each row\n corresponds to a single data point.\n\n y : None\n Ignored. This parameter exists only for compatibility with\n :class:`~sklearn.pipeline.Pipeline`.\n\n sample_weight : array-like of shape (n_samples,), default=None\n List of sample weights attached to the data X.\n\n .. versionadded:: 0.20\n\n Returns\n -------\n self : object\n Returns the instance itself.\n " }, @@ -196559,7 +195731,7 @@ "qname": "sklearn.neighbors._kde.KernelDensity.sample.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -196572,7 +195744,7 @@ "qname": "sklearn.neighbors._kde.KernelDensity.sample.n_samples", "default_value": "1", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1", "description": "Number of samples to generate." @@ -196588,7 +195760,7 @@ "qname": "sklearn.neighbors._kde.KernelDensity.sample.random_state", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Determines random number generation used to generate\nrandom samples. Pass an int for reproducible results\nacross multiple function calls.\nSee :term:`Glossary `." @@ -196613,7 +195785,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Generate random samples from the model.\n\nCurrently, this is implemented only for gaussian and tophat kernels.", "docstring": "Generate random samples from the model.\n\n Currently, this is implemented only for gaussian and tophat kernels.\n\n Parameters\n ----------\n n_samples : int, default=1\n Number of samples to generate.\n\n random_state : int, RandomState instance or None, default=None\n Determines random number generation used to generate\n random samples. Pass an int for reproducible results\n across multiple function calls.\n See :term:`Glossary `.\n\n Returns\n -------\n X : array-like of shape (n_samples, n_features)\n List of samples.\n " }, @@ -196629,7 +195801,7 @@ "qname": "sklearn.neighbors._kde.KernelDensity.score.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -196642,7 +195814,7 @@ "qname": "sklearn.neighbors._kde.KernelDensity.score.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "List of n_features-dimensional data points. Each row\ncorresponds to a single data point." @@ -196658,7 +195830,7 @@ "qname": "sklearn.neighbors._kde.KernelDensity.score.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "None", "description": "Ignored. This parameter exists only for compatibility with\n:class:`~sklearn.pipeline.Pipeline`." @@ -196670,7 +195842,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute the total log-likelihood under the model.", "docstring": "Compute the total log-likelihood under the model.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n List of n_features-dimensional data points. Each row\n corresponds to a single data point.\n\n y : None\n Ignored. This parameter exists only for compatibility with\n :class:`~sklearn.pipeline.Pipeline`.\n\n Returns\n -------\n logprob : float\n Total log-likelihood of the data in X. This is normalized to be a\n probability density, so the value will be low for high-dimensional\n data.\n " }, @@ -196686,7 +195858,7 @@ "qname": "sklearn.neighbors._kde.KernelDensity.score_samples.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -196699,7 +195871,7 @@ "qname": "sklearn.neighbors._kde.KernelDensity.score_samples.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "An array of points to query. Last dimension should match dimension\nof training data (n_features)." @@ -196711,7 +195883,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute the log-likelihood of each sample under the model.", "docstring": "Compute the log-likelihood of each sample under the model.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n An array of points to query. Last dimension should match dimension\n of training data (n_features).\n\n Returns\n -------\n density : ndarray of shape (n_samples,)\n Log-likelihood of each sample in `X`. These are normalized to be\n probability densities, so values will be low for high-dimensional\n data.\n " }, @@ -196727,7 +195899,7 @@ "qname": "sklearn.neighbors._lof.LocalOutlierFactor.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -196740,7 +195912,7 @@ "qname": "sklearn.neighbors._lof.LocalOutlierFactor.__init__.n_neighbors", "default_value": "20", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=20", "description": "Number of neighbors to use by default for :meth:`kneighbors` queries.\nIf n_neighbors is larger than the number of samples provided,\nall samples will be used." @@ -196756,14 +195928,14 @@ "qname": "sklearn.neighbors._lof.LocalOutlierFactor.__init__.algorithm", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'", "description": "Algorithm used to compute the nearest neighbors:\n\n- 'ball_tree' will use :class:`BallTree`\n- 'kd_tree' will use :class:`KDTree`\n- 'brute' will use a brute-force search.\n- 'auto' will attempt to decide the most appropriate algorithm\n based on the values passed to :meth:`fit` method.\n\nNote: fitting on sparse input will override the setting of\nthis parameter, using brute force." }, "type": { "kind": "EnumType", - "values": ["auto", "kd_tree", "ball_tree", "brute"] + "values": ["auto", "kd_tree", "brute", "ball_tree"] } }, { @@ -196772,7 +195944,7 @@ "qname": "sklearn.neighbors._lof.LocalOutlierFactor.__init__.leaf_size", "default_value": "30", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=30", "description": "Leaf is size passed to :class:`BallTree` or :class:`KDTree`. This can\naffect the speed of the construction and query, as well as the memory\nrequired to store the tree. The optimal value depends on the\nnature of the problem." @@ -196788,7 +195960,7 @@ "qname": "sklearn.neighbors._lof.LocalOutlierFactor.__init__.metric", "default_value": "'minkowski'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str or callable, default='minkowski'", "description": "The metric is used for distance computation. Any metric from scikit-learn\nor scipy.spatial.distance can be used.\n\nIf metric is \"precomputed\", X is assumed to be a distance matrix and\nmust be square. X may be a sparse matrix, in which case only \"nonzero\"\nelements may be considered neighbors.\n\nIf metric is a callable function, it is called on each\npair of instances (rows) and the resulting value recorded. The callable\nshould take two arrays as input and return one value indicating the\ndistance between them. This works for Scipy's metrics, but is less\nefficient than passing the metric name as a string.\n\nValid values for metric are:\n\n- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',\n 'manhattan']\n\n- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',\n 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',\n 'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',\n 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',\n 'yule']\n\nSee the documentation for scipy.spatial.distance for details on these\nmetrics:\nhttps://docs.scipy.org/doc/scipy/reference/spatial.distance.html." @@ -196813,7 +195985,7 @@ "qname": "sklearn.neighbors._lof.LocalOutlierFactor.__init__.p", "default_value": "2", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=2", "description": "Parameter for the Minkowski metric from\n:func:`sklearn.metrics.pairwise.pairwise_distances`. When p = 1, this\nis equivalent to using manhattan_distance (l1), and euclidean_distance\n(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used." @@ -196829,7 +196001,7 @@ "qname": "sklearn.neighbors._lof.LocalOutlierFactor.__init__.metric_params", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict, default=None", "description": "Additional keyword arguments for the metric function." @@ -196845,7 +196017,7 @@ "qname": "sklearn.neighbors._lof.LocalOutlierFactor.__init__.contamination", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "'auto' or float, default='auto'", "description": "The amount of contamination of the data set, i.e. the proportion\nof outliers in the data set. When fitting this is used to define the\nthreshold on the scores of the samples.\n\n- if 'auto', the threshold is determined as in the\n original paper,\n- if a float, the contamination should be in the range (0, 0.5].\n\n.. versionchanged:: 0.22\n The default value of ``contamination`` changed from 0.1\n to ``'auto'``." @@ -196878,7 +196050,7 @@ "qname": "sklearn.neighbors._lof.LocalOutlierFactor.__init__.novelty", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "By default, LocalOutlierFactor is only meant to be used for outlier\ndetection (novelty=False). Set novelty to True if you want to use\nLocalOutlierFactor for novelty detection. In this case be aware that\nyou should only use predict, decision_function and score_samples\non new unseen data and not on the training set; and note that the\nresults obtained this way may differ from the standard LOF results.\n\n.. versionadded:: 0.20" @@ -196894,7 +196066,7 @@ "qname": "sklearn.neighbors._lof.LocalOutlierFactor.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "The number of parallel jobs to run for neighbors search.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details." @@ -196906,7 +196078,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -197120,7 +196292,7 @@ "qname": "sklearn.neighbors._lof.LocalOutlierFactor.decision_function.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -197133,7 +196305,7 @@ "qname": "sklearn.neighbors._lof.LocalOutlierFactor.decision_function.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "The query sample or samples to compute the Local Outlier Factor\nw.r.t. the training samples." @@ -197145,7 +196317,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Shifted opposite of the Local Outlier Factor of X.\n\nBigger is better, i.e. large values correspond to inliers.\n\n**Only available for novelty detection (when novelty is set to True).**\nThe shift offset allows a zero threshold for being an outlier.\nThe argument X is supposed to contain *new data*: if X contains a\npoint from training, it considers the later in its own neighborhood.\nAlso, the samples in X are not considered in the neighborhood of any\npoint.", "docstring": "Shifted opposite of the Local Outlier Factor of X.\n\n Bigger is better, i.e. large values correspond to inliers.\n\n **Only available for novelty detection (when novelty is set to True).**\n The shift offset allows a zero threshold for being an outlier.\n The argument X is supposed to contain *new data*: if X contains a\n point from training, it considers the later in its own neighborhood.\n Also, the samples in X are not considered in the neighborhood of any\n point.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The query sample or samples to compute the Local Outlier Factor\n w.r.t. the training samples.\n\n Returns\n -------\n shifted_opposite_lof_scores : ndarray of shape (n_samples,)\n The shifted opposite of the Local Outlier Factor of each input\n samples. The lower, the more abnormal. Negative scores represent\n outliers, positive scores represent inliers.\n " }, @@ -197161,7 +196333,7 @@ "qname": "sklearn.neighbors._lof.LocalOutlierFactor.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -197174,7 +196346,7 @@ "qname": "sklearn.neighbors._lof.LocalOutlierFactor.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_samples) if metric='precomputed'", "description": "Training data." @@ -197199,7 +196371,7 @@ "qname": "sklearn.neighbors._lof.LocalOutlierFactor.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -197211,7 +196383,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the local outlier factor detector from the training dataset.", "docstring": "Fit the local outlier factor detector from the training dataset.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_samples) if metric='precomputed'\n Training data.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n self : LocalOutlierFactor\n The fitted local outlier factor detector.\n " }, @@ -197227,7 +196399,7 @@ "qname": "sklearn.neighbors._lof.LocalOutlierFactor.fit_predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -197240,7 +196412,7 @@ "qname": "sklearn.neighbors._lof.LocalOutlierFactor.fit_predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features), default=None", "description": "The query sample or samples to compute the Local Outlier Factor\nw.r.t. to the training samples." @@ -197256,7 +196428,7 @@ "qname": "sklearn.neighbors._lof.LocalOutlierFactor.fit_predict.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -197268,7 +196440,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the model to the training set X and return the labels.\n\n**Not available for novelty detection (when novelty is set to True).**\nLabel is 1 for an inlier and -1 for an outlier according to the LOF\nscore and the contamination parameter.", "docstring": "Fit the model to the training set X and return the labels.\n\n **Not available for novelty detection (when novelty is set to True).**\n Label is 1 for an inlier and -1 for an outlier according to the LOF\n score and the contamination parameter.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features), default=None\n The query sample or samples to compute the Local Outlier Factor\n w.r.t. to the training samples.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n is_inlier : ndarray of shape (n_samples,)\n Returns -1 for anomalies/outliers and 1 for inliers.\n " }, @@ -197284,7 +196456,7 @@ "qname": "sklearn.neighbors._lof.LocalOutlierFactor.predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -197297,7 +196469,7 @@ "qname": "sklearn.neighbors._lof.LocalOutlierFactor.predict.X", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "The query sample or samples to compute the Local Outlier Factor\nw.r.t. to the training samples." @@ -197309,7 +196481,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict the labels (1 inlier, -1 outlier) of X according to LOF.\n\n**Only available for novelty detection (when novelty is set to True).**\nThis method allows to generalize prediction to *new observations* (not\nin the training set). Note that the result of ``clf.fit(X)`` then\n``clf.predict(X)`` with ``novelty=True`` may differ from the result\nobtained by ``clf.fit_predict(X)`` with ``novelty=False``.", "docstring": "Predict the labels (1 inlier, -1 outlier) of X according to LOF.\n\n **Only available for novelty detection (when novelty is set to True).**\n This method allows to generalize prediction to *new observations* (not\n in the training set). Note that the result of ``clf.fit(X)`` then\n ``clf.predict(X)`` with ``novelty=True`` may differ from the result\n obtained by ``clf.fit_predict(X)`` with ``novelty=False``.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The query sample or samples to compute the Local Outlier Factor\n w.r.t. to the training samples.\n\n Returns\n -------\n is_inlier : ndarray of shape (n_samples,)\n Returns -1 for anomalies/outliers and +1 for inliers.\n " }, @@ -197325,7 +196497,7 @@ "qname": "sklearn.neighbors._lof.LocalOutlierFactor.score_samples.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -197338,7 +196510,7 @@ "qname": "sklearn.neighbors._lof.LocalOutlierFactor.score_samples.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "The query sample or samples to compute the Local Outlier Factor\nw.r.t. the training samples." @@ -197350,7 +196522,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Opposite of the Local Outlier Factor of X.\n\nIt is the opposite as bigger is better, i.e. large values correspond\nto inliers.\n\n**Only available for novelty detection (when novelty is set to True).**\nThe argument X is supposed to contain *new data*: if X contains a\npoint from training, it considers the later in its own neighborhood.\nAlso, the samples in X are not considered in the neighborhood of any\npoint. Because of this, the scores obtained via ``score_samples`` may\ndiffer from the standard LOF scores.\nThe standard LOF scores for the training data is available via the\n``negative_outlier_factor_`` attribute.", "docstring": "Opposite of the Local Outlier Factor of X.\n\n It is the opposite as bigger is better, i.e. large values correspond\n to inliers.\n\n **Only available for novelty detection (when novelty is set to True).**\n The argument X is supposed to contain *new data*: if X contains a\n point from training, it considers the later in its own neighborhood.\n Also, the samples in X are not considered in the neighborhood of any\n point. Because of this, the scores obtained via ``score_samples`` may\n differ from the standard LOF scores.\n The standard LOF scores for the training data is available via the\n ``negative_outlier_factor_`` attribute.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The query sample or samples to compute the Local Outlier Factor\n w.r.t. the training samples.\n\n Returns\n -------\n opposite_lof_scores : ndarray of shape (n_samples,)\n The opposite of the Local Outlier Factor of each input samples.\n The lower, the more abnormal.\n " }, @@ -197366,7 +196538,7 @@ "qname": "sklearn.neighbors._nca.NeighborhoodComponentsAnalysis.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -197379,7 +196551,7 @@ "qname": "sklearn.neighbors._nca.NeighborhoodComponentsAnalysis.__init__.n_components", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Preferred dimensionality of the projected space.\nIf None it will be set to `n_features`." @@ -197395,7 +196567,7 @@ "qname": "sklearn.neighbors._nca.NeighborhoodComponentsAnalysis.__init__.init", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'auto', 'pca', 'lda', 'identity', 'random'} or ndarray of shape (n_features_a, n_features_b), default='auto'", "description": "Initialization of the linear transformation. Possible options are\n`'auto'`, `'pca'`, `'lda'`, `'identity'`, `'random'`, and a numpy\narray of shape `(n_features_a, n_features_b)`.\n\n- `'auto'`\n Depending on `n_components`, the most reasonable initialization\n will be chosen. If `n_components <= n_classes` we use `'lda'`, as\n it uses labels information. If not, but\n `n_components < min(n_features, n_samples)`, we use `'pca'`, as\n it projects data in meaningful directions (those of higher\n variance). Otherwise, we just use `'identity'`.\n\n- `'pca'`\n `n_components` principal components of the inputs passed\n to :meth:`fit` will be used to initialize the transformation.\n (See :class:`~sklearn.decomposition.PCA`)\n\n- `'lda'`\n `min(n_components, n_classes)` most discriminative\n components of the inputs passed to :meth:`fit` will be used to\n initialize the transformation. (If `n_components > n_classes`,\n the rest of the components will be zero.) (See\n :class:`~sklearn.discriminant_analysis.LinearDiscriminantAnalysis`)\n\n- `'identity'`\n If `n_components` is strictly smaller than the\n dimensionality of the inputs passed to :meth:`fit`, the identity\n matrix will be truncated to the first `n_components` rows.\n\n- `'random'`\n The initial transformation will be a random array of shape\n `(n_components, n_features)`. Each value is sampled from the\n standard normal distribution.\n\n- numpy array\n `n_features_b` must match the dimensionality of the inputs passed\n to :meth:`fit` and n_features_a must be less than or equal to that.\n If `n_components` is not `None`, `n_features_a` must match it." @@ -197405,7 +196577,7 @@ "types": [ { "kind": "EnumType", - "values": ["auto", "identity", "random", "lda", "pca"] + "values": ["auto", "random", "pca", "lda", "identity"] }, { "kind": "NamedType", @@ -197420,7 +196592,7 @@ "qname": "sklearn.neighbors._nca.NeighborhoodComponentsAnalysis.__init__.warm_start", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If `True` and :meth:`fit` has been called before, the solution of the\nprevious call to :meth:`fit` is used as the initial linear\ntransformation (`n_components` and `init` will be ignored)." @@ -197436,7 +196608,7 @@ "qname": "sklearn.neighbors._nca.NeighborhoodComponentsAnalysis.__init__.max_iter", "default_value": "50", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=50", "description": "Maximum number of iterations in the optimization." @@ -197452,7 +196624,7 @@ "qname": "sklearn.neighbors._nca.NeighborhoodComponentsAnalysis.__init__.tol", "default_value": "1e-05", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-5", "description": "Convergence tolerance for the optimization." @@ -197468,7 +196640,7 @@ "qname": "sklearn.neighbors._nca.NeighborhoodComponentsAnalysis.__init__.callback", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "callable, default=None", "description": "If not `None`, this function is called after every iteration of the\noptimizer, taking as arguments the current solution (flattened\ntransformation matrix) and the number of iterations. This might be\nuseful in case one wants to examine or store the transformation\nfound after each iteration." @@ -197484,7 +196656,7 @@ "qname": "sklearn.neighbors._nca.NeighborhoodComponentsAnalysis.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "If 0, no progress messages will be printed.\nIf 1, progress messages will be printed to stdout.\nIf > 1, progress messages will be printed and the `disp`\nparameter of :func:`scipy.optimize.minimize` will be set to\n`verbose - 2`." @@ -197500,7 +196672,7 @@ "qname": "sklearn.neighbors._nca.NeighborhoodComponentsAnalysis.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or numpy.RandomState, default=None", "description": "A pseudo random number generator object or a seed for it if int. If\n`init='random'`, `random_state` is used to initialize the random\ntransformation. If `init='pca'`, `random_state` is passed as an\nargument to PCA when initializing the transformation. Pass an int\nfor reproducible results across multiple function calls.\nSee :term:`Glossary `." @@ -197521,7 +196693,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -197828,7 +197000,7 @@ "qname": "sklearn.neighbors._nca.NeighborhoodComponentsAnalysis.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -197841,7 +197013,7 @@ "qname": "sklearn.neighbors._nca.NeighborhoodComponentsAnalysis.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "The training samples." @@ -197857,7 +197029,7 @@ "qname": "sklearn.neighbors._nca.NeighborhoodComponentsAnalysis.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "The corresponding training labels." @@ -197869,7 +197041,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the model according to the given training data.", "docstring": "Fit the model according to the given training data.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The training samples.\n\n y : array-like of shape (n_samples,)\n The corresponding training labels.\n\n Returns\n -------\n self : object\n Fitted estimator.\n " }, @@ -197885,7 +197057,7 @@ "qname": "sklearn.neighbors._nca.NeighborhoodComponentsAnalysis.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -197898,7 +197070,7 @@ "qname": "sklearn.neighbors._nca.NeighborhoodComponentsAnalysis.transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Data samples." @@ -197910,7 +197082,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Apply the learned transformation to the given data.", "docstring": "Apply the learned transformation to the given data.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Data samples.\n\n Returns\n -------\n X_embedded: ndarray of shape (n_samples, n_components)\n The data samples transformed.\n\n Raises\n ------\n NotFittedError\n If :meth:`fit` has not been called before.\n " }, @@ -197926,7 +197098,7 @@ "qname": "sklearn.neighbors._nearest_centroid.NearestCentroid.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -197939,7 +197111,7 @@ "qname": "sklearn.neighbors._nearest_centroid.NearestCentroid.__init__.metric", "default_value": "'euclidean'", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "str or callable, default=\"euclidean\"", "description": "The metric to use when calculating distance between instances in a\nfeature array. If metric is a string or callable, it must be one of\nthe options allowed by\n:func:`~sklearn.metrics.pairwise_distances` for its metric\nparameter. The centroids for the samples corresponding to each class is\nthe point from which the sum of the distances (according to the metric)\nof all samples that belong to that particular class are minimized.\nIf the `\"manhattan\"` metric is provided, this centroid is the median\nand for all other metrics, the centroid is now set to be the mean.\n\n.. versionchanged:: 0.19\n `metric='precomputed'` was deprecated and now raises an error" @@ -197964,7 +197136,7 @@ "qname": "sklearn.neighbors._nearest_centroid.NearestCentroid.__init__.shrink_threshold", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=None", "description": "Threshold for shrinking centroids to remove features." @@ -197976,7 +197148,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -197992,7 +197164,7 @@ "qname": "sklearn.neighbors._nearest_centroid.NearestCentroid.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -198005,7 +197177,7 @@ "qname": "sklearn.neighbors._nearest_centroid.NearestCentroid.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Training vector, where `n_samples` is the number of samples and\n`n_features` is the number of features.\nNote that centroid shrinking cannot be used with sparse matrices." @@ -198030,7 +197202,7 @@ "qname": "sklearn.neighbors._nearest_centroid.NearestCentroid.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "Target values." @@ -198042,7 +197214,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the NearestCentroid model according to the given training data.", "docstring": "\n Fit the NearestCentroid model according to the given training data.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vector, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n Note that centroid shrinking cannot be used with sparse matrices.\n y : array-like of shape (n_samples,)\n Target values.\n\n Returns\n -------\n self : object\n Fitted estimator.\n " }, @@ -198058,7 +197230,7 @@ "qname": "sklearn.neighbors._nearest_centroid.NearestCentroid.predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -198071,7 +197243,7 @@ "qname": "sklearn.neighbors._nearest_centroid.NearestCentroid.predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Test samples." @@ -198092,7 +197264,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Perform classification on an array of test vectors `X`.\n\nThe predicted class `C` for each sample in `X` is returned.", "docstring": "Perform classification on an array of test vectors `X`.\n\n The predicted class `C` for each sample in `X` is returned.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Test samples.\n\n Returns\n -------\n C : ndarray of shape (n_samples,)\n The predicted classes.\n\n Notes\n -----\n If the metric constructor parameter is `\"precomputed\"`, `X` is assumed\n to be the distance matrix between the data to be predicted and\n `self.centroids_`.\n " }, @@ -198108,7 +197280,7 @@ "qname": "sklearn.neighbors._regression.KNeighborsRegressor.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -198121,7 +197293,7 @@ "qname": "sklearn.neighbors._regression.KNeighborsRegressor.__init__.n_neighbors", "default_value": "5", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=5", "description": "Number of neighbors to use by default for :meth:`kneighbors` queries." @@ -198137,7 +197309,7 @@ "qname": "sklearn.neighbors._regression.KNeighborsRegressor.__init__.weights", "default_value": "'uniform'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'uniform', 'distance'} or callable, default='uniform'", "description": "Weight function used in prediction. Possible values:\n\n- 'uniform' : uniform weights. All points in each neighborhood\n are weighted equally.\n- 'distance' : weight points by the inverse of their distance.\n in this case, closer neighbors of a query point will have a\n greater influence than neighbors which are further away.\n- [callable] : a user-defined function which accepts an\n array of distances, and returns an array of the same shape\n containing the weights.\n\nUniform weights are used by default." @@ -198147,7 +197319,7 @@ "types": [ { "kind": "EnumType", - "values": ["distance", "uniform"] + "values": ["uniform", "distance"] }, { "kind": "NamedType", @@ -198162,14 +197334,14 @@ "qname": "sklearn.neighbors._regression.KNeighborsRegressor.__init__.algorithm", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'", "description": "Algorithm used to compute the nearest neighbors:\n\n- 'ball_tree' will use :class:`BallTree`\n- 'kd_tree' will use :class:`KDTree`\n- 'brute' will use a brute-force search.\n- 'auto' will attempt to decide the most appropriate algorithm\n based on the values passed to :meth:`fit` method.\n\nNote: fitting on sparse input will override the setting of\nthis parameter, using brute force." }, "type": { "kind": "EnumType", - "values": ["auto", "kd_tree", "ball_tree", "brute"] + "values": ["auto", "kd_tree", "brute", "ball_tree"] } }, { @@ -198178,7 +197350,7 @@ "qname": "sklearn.neighbors._regression.KNeighborsRegressor.__init__.leaf_size", "default_value": "30", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=30", "description": "Leaf size passed to BallTree or KDTree. This can affect the\nspeed of the construction and query, as well as the memory\nrequired to store the tree. The optimal value depends on the\nnature of the problem." @@ -198194,7 +197366,7 @@ "qname": "sklearn.neighbors._regression.KNeighborsRegressor.__init__.p", "default_value": "2", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=2", "description": "Power parameter for the Minkowski metric. When p = 1, this is\nequivalent to using manhattan_distance (l1), and euclidean_distance\n(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used." @@ -198210,7 +197382,7 @@ "qname": "sklearn.neighbors._regression.KNeighborsRegressor.__init__.metric", "default_value": "'minkowski'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str or callable, default='minkowski'", "description": "The distance metric to use for the tree. The default metric is\nminkowski, and with p=2 is equivalent to the standard Euclidean\nmetric. For a list of available metrics, see the documentation of\n:class:`~sklearn.metrics.DistanceMetric` and the metrics listed in\n`sklearn.metrics.pairwise.PAIRWISE_DISTANCE_FUNCTIONS`. Note that the\n\"cosine\" metric uses :func:`~sklearn.metrics.pairwise.cosine_distances`.\nIf metric is \"precomputed\", X is assumed to be a distance matrix and\nmust be square during fit. X may be a :term:`sparse graph`,\nin which case only \"nonzero\" elements may be considered neighbors." @@ -198235,7 +197407,7 @@ "qname": "sklearn.neighbors._regression.KNeighborsRegressor.__init__.metric_params", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict, default=None", "description": "Additional keyword arguments for the metric function." @@ -198251,7 +197423,7 @@ "qname": "sklearn.neighbors._regression.KNeighborsRegressor.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "The number of parallel jobs to run for neighbors search.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details.\nDoesn't affect :meth:`fit` method." @@ -198263,7 +197435,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -198304,7 +197476,7 @@ "qname": "sklearn.neighbors._regression.KNeighborsRegressor.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -198317,7 +197489,7 @@ "qname": "sklearn.neighbors._regression.KNeighborsRegressor.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_samples) if metric='precomputed'", "description": "Training data." @@ -198342,7 +197514,7 @@ "qname": "sklearn.neighbors._regression.KNeighborsRegressor.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_outputs)", "description": "Target values." @@ -198363,7 +197535,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the k-nearest neighbors regressor from the training dataset.", "docstring": "Fit the k-nearest neighbors regressor from the training dataset.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_samples) if metric='precomputed'\n Training data.\n\n y : {array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_outputs)\n Target values.\n\n Returns\n -------\n self : KNeighborsRegressor\n The fitted k-nearest neighbors regressor.\n " }, @@ -198379,7 +197551,7 @@ "qname": "sklearn.neighbors._regression.KNeighborsRegressor.predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -198392,7 +197564,7 @@ "qname": "sklearn.neighbors._regression.KNeighborsRegressor.predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_queries, n_features), or (n_queries, n_indexed) if metric == 'precomputed'", "description": "Test samples." @@ -198413,7 +197585,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict the target for the provided data.", "docstring": "Predict the target for the provided data.\n\n Parameters\n ----------\n X : array-like of shape (n_queries, n_features), or (n_queries, n_indexed) if metric == 'precomputed'\n Test samples.\n\n Returns\n -------\n y : ndarray of shape (n_queries,) or (n_queries, n_outputs), dtype=int\n Target values.\n " }, @@ -198429,7 +197601,7 @@ "qname": "sklearn.neighbors._regression.RadiusNeighborsRegressor.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -198442,7 +197614,7 @@ "qname": "sklearn.neighbors._regression.RadiusNeighborsRegressor.__init__.radius", "default_value": "1.0", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1.0", "description": "Range of parameter space to use by default for :meth:`radius_neighbors`\nqueries." @@ -198458,7 +197630,7 @@ "qname": "sklearn.neighbors._regression.RadiusNeighborsRegressor.__init__.weights", "default_value": "'uniform'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'uniform', 'distance'} or callable, default='uniform'", "description": "Weight function used in prediction. Possible values:\n\n- 'uniform' : uniform weights. All points in each neighborhood\n are weighted equally.\n- 'distance' : weight points by the inverse of their distance.\n in this case, closer neighbors of a query point will have a\n greater influence than neighbors which are further away.\n- [callable] : a user-defined function which accepts an\n array of distances, and returns an array of the same shape\n containing the weights.\n\nUniform weights are used by default." @@ -198468,7 +197640,7 @@ "types": [ { "kind": "EnumType", - "values": ["distance", "uniform"] + "values": ["uniform", "distance"] }, { "kind": "NamedType", @@ -198483,14 +197655,14 @@ "qname": "sklearn.neighbors._regression.RadiusNeighborsRegressor.__init__.algorithm", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'", "description": "Algorithm used to compute the nearest neighbors:\n\n- 'ball_tree' will use :class:`BallTree`\n- 'kd_tree' will use :class:`KDTree`\n- 'brute' will use a brute-force search.\n- 'auto' will attempt to decide the most appropriate algorithm\n based on the values passed to :meth:`fit` method.\n\nNote: fitting on sparse input will override the setting of\nthis parameter, using brute force." }, "type": { "kind": "EnumType", - "values": ["auto", "kd_tree", "ball_tree", "brute"] + "values": ["auto", "kd_tree", "brute", "ball_tree"] } }, { @@ -198499,7 +197671,7 @@ "qname": "sklearn.neighbors._regression.RadiusNeighborsRegressor.__init__.leaf_size", "default_value": "30", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=30", "description": "Leaf size passed to BallTree or KDTree. This can affect the\nspeed of the construction and query, as well as the memory\nrequired to store the tree. The optimal value depends on the\nnature of the problem." @@ -198515,7 +197687,7 @@ "qname": "sklearn.neighbors._regression.RadiusNeighborsRegressor.__init__.p", "default_value": "2", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=2", "description": "Power parameter for the Minkowski metric. When p = 1, this is\nequivalent to using manhattan_distance (l1), and euclidean_distance\n(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used." @@ -198531,7 +197703,7 @@ "qname": "sklearn.neighbors._regression.RadiusNeighborsRegressor.__init__.metric", "default_value": "'minkowski'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str or callable, default='minkowski'", "description": "The distance metric to use for the tree. The default metric is\nminkowski, and with p=2 is equivalent to the standard Euclidean\nmetric. See the documentation of :class:`DistanceMetric` for a\nlist of available metrics.\nIf metric is \"precomputed\", X is assumed to be a distance matrix and\nmust be square during fit. X may be a :term:`sparse graph`,\nin which case only \"nonzero\" elements may be considered neighbors." @@ -198556,7 +197728,7 @@ "qname": "sklearn.neighbors._regression.RadiusNeighborsRegressor.__init__.metric_params", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict, default=None", "description": "Additional keyword arguments for the metric function." @@ -198572,7 +197744,7 @@ "qname": "sklearn.neighbors._regression.RadiusNeighborsRegressor.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "The number of parallel jobs to run for neighbors search.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details." @@ -198584,7 +197756,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -198600,7 +197772,7 @@ "qname": "sklearn.neighbors._regression.RadiusNeighborsRegressor.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -198613,7 +197785,7 @@ "qname": "sklearn.neighbors._regression.RadiusNeighborsRegressor.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_samples) if metric='precomputed'", "description": "Training data." @@ -198638,7 +197810,7 @@ "qname": "sklearn.neighbors._regression.RadiusNeighborsRegressor.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_outputs)", "description": "Target values." @@ -198659,7 +197831,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the radius neighbors regressor from the training dataset.", "docstring": "Fit the radius neighbors regressor from the training dataset.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_samples) if metric='precomputed'\n Training data.\n\n y : {array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_outputs)\n Target values.\n\n Returns\n -------\n self : RadiusNeighborsRegressor\n The fitted radius neighbors regressor.\n " }, @@ -198675,7 +197847,7 @@ "qname": "sklearn.neighbors._regression.RadiusNeighborsRegressor.predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -198688,7 +197860,7 @@ "qname": "sklearn.neighbors._regression.RadiusNeighborsRegressor.predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_queries, n_features), or (n_queries, n_indexed) if metric == 'precomputed'", "description": "Test samples." @@ -198709,7 +197881,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict the target for the provided data.", "docstring": "Predict the target for the provided data.\n\n Parameters\n ----------\n X : array-like of shape (n_queries, n_features), or (n_queries, n_indexed) if metric == 'precomputed'\n Test samples.\n\n Returns\n -------\n y : ndarray of shape (n_queries,) or (n_queries, n_outputs), dtype=double\n Target values.\n " }, @@ -198725,7 +197897,7 @@ "qname": "sklearn.neighbors._unsupervised.NearestNeighbors.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -198738,7 +197910,7 @@ "qname": "sklearn.neighbors._unsupervised.NearestNeighbors.__init__.n_neighbors", "default_value": "5", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=5", "description": "Number of neighbors to use by default for :meth:`kneighbors` queries." @@ -198754,7 +197926,7 @@ "qname": "sklearn.neighbors._unsupervised.NearestNeighbors.__init__.radius", "default_value": "1.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1.0", "description": "Range of parameter space to use by default for :meth:`radius_neighbors`\nqueries." @@ -198770,14 +197942,14 @@ "qname": "sklearn.neighbors._unsupervised.NearestNeighbors.__init__.algorithm", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'", "description": "Algorithm used to compute the nearest neighbors:\n\n- 'ball_tree' will use :class:`BallTree`\n- 'kd_tree' will use :class:`KDTree`\n- 'brute' will use a brute-force search.\n- 'auto' will attempt to decide the most appropriate algorithm\n based on the values passed to :meth:`fit` method.\n\nNote: fitting on sparse input will override the setting of\nthis parameter, using brute force." }, "type": { "kind": "EnumType", - "values": ["auto", "kd_tree", "ball_tree", "brute"] + "values": ["auto", "kd_tree", "brute", "ball_tree"] } }, { @@ -198786,7 +197958,7 @@ "qname": "sklearn.neighbors._unsupervised.NearestNeighbors.__init__.leaf_size", "default_value": "30", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=30", "description": "Leaf size passed to BallTree or KDTree. This can affect the\nspeed of the construction and query, as well as the memory\nrequired to store the tree. The optimal value depends on the\nnature of the problem." @@ -198802,7 +197974,7 @@ "qname": "sklearn.neighbors._unsupervised.NearestNeighbors.__init__.metric", "default_value": "'minkowski'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "str or callable, default='minkowski'", "description": "The distance metric to use for the tree. The default metric is\nminkowski, and with p=2 is equivalent to the standard Euclidean\nmetric. For a list of available metrics, see the documentation of\n:class:`~sklearn.metrics.DistanceMetric` and the metrics listed in\n`sklearn.metrics.pairwise.PAIRWISE_DISTANCE_FUNCTIONS`. Note that the\n\"cosine\" metric uses :func:`~sklearn.metrics.pairwise.cosine_distances`.\nIf metric is \"precomputed\", X is assumed to be a distance matrix and\nmust be square during fit. X may be a :term:`sparse graph`,\nin which case only \"nonzero\" elements may be considered neighbors." @@ -198827,7 +197999,7 @@ "qname": "sklearn.neighbors._unsupervised.NearestNeighbors.__init__.p", "default_value": "2", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=2", "description": "Parameter for the Minkowski metric from\nsklearn.metrics.pairwise.pairwise_distances. When p = 1, this is\nequivalent to using manhattan_distance (l1), and euclidean_distance\n(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used." @@ -198843,7 +198015,7 @@ "qname": "sklearn.neighbors._unsupervised.NearestNeighbors.__init__.metric_params", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict, default=None", "description": "Additional keyword arguments for the metric function." @@ -198859,7 +198031,7 @@ "qname": "sklearn.neighbors._unsupervised.NearestNeighbors.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "The number of parallel jobs to run for neighbors search.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details." @@ -198871,7 +198043,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -198887,7 +198059,7 @@ "qname": "sklearn.neighbors._unsupervised.NearestNeighbors.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -198900,7 +198072,7 @@ "qname": "sklearn.neighbors._unsupervised.NearestNeighbors.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_samples) if metric='precomputed'", "description": "Training data." @@ -198925,7 +198097,7 @@ "qname": "sklearn.neighbors._unsupervised.NearestNeighbors.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -198937,7 +198109,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the nearest neighbors estimator from the training dataset.", "docstring": "Fit the nearest neighbors estimator from the training dataset.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_samples) if metric='precomputed'\n Training data.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n self : NearestNeighbors\n The fitted nearest neighbors estimator.\n " }, @@ -201219,7 +200391,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPClassifier.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -201232,7 +200404,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPClassifier.__init__.hidden_layer_sizes", "default_value": "(100, )", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "tuple, length = n_layers - 2, default=(100,)", "description": "The ith element represents the number of neurons in the ith\nhidden layer." @@ -201257,7 +200429,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPClassifier.__init__.activation", "default_value": "'relu'", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{'identity', 'logistic', 'tanh', 'relu'}, default='relu'", "description": "Activation function for the hidden layer.\n\n- 'identity', no-op activation, useful to implement linear bottleneck,\n returns f(x) = x\n\n- 'logistic', the logistic sigmoid function,\n returns f(x) = 1 / (1 + exp(-x)).\n\n- 'tanh', the hyperbolic tan function,\n returns f(x) = tanh(x).\n\n- 'relu', the rectified linear unit function,\n returns f(x) = max(0, x)" @@ -201273,7 +200445,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPClassifier.__init__.solver", "default_value": "'adam'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'lbfgs', 'sgd', 'adam'}, default='adam'", "description": "The solver for weight optimization.\n\n- 'lbfgs' is an optimizer in the family of quasi-Newton methods.\n\n- 'sgd' refers to stochastic gradient descent.\n\n- 'adam' refers to a stochastic gradient-based optimizer proposed\n by Kingma, Diederik, and Jimmy Ba\n\nNote: The default solver 'adam' works pretty well on relatively\nlarge datasets (with thousands of training samples or more) in terms of\nboth training time and validation score.\nFor small datasets, however, 'lbfgs' can converge faster and perform\nbetter." @@ -201289,7 +200461,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPClassifier.__init__.alpha", "default_value": "0.0001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0001", "description": "Strength of the L2 regularization term. The L2 regularization term\nis divided by the sample size when added to the loss." @@ -201305,7 +200477,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPClassifier.__init__.batch_size", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default='auto'", "description": "Size of minibatches for stochastic optimizers.\nIf the solver is 'lbfgs', the classifier will not use minibatch.\nWhen set to \"auto\", `batch_size=min(200, n_samples)`." @@ -201321,14 +200493,14 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPClassifier.__init__.learning_rate", "default_value": "'constant'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'constant', 'invscaling', 'adaptive'}, default='constant'", "description": "Learning rate schedule for weight updates.\n\n- 'constant' is a constant learning rate given by\n 'learning_rate_init'.\n\n- 'invscaling' gradually decreases the learning rate at each\n time step 't' using an inverse scaling exponent of 'power_t'.\n effective_learning_rate = learning_rate_init / pow(t, power_t)\n\n- 'adaptive' keeps the learning rate constant to\n 'learning_rate_init' as long as training loss keeps decreasing.\n Each time two consecutive epochs fail to decrease training loss by at\n least tol, or fail to increase validation score by at least tol if\n 'early_stopping' is on, the current learning rate is divided by 5.\n\nOnly used when ``solver='sgd'``." }, "type": { "kind": "EnumType", - "values": ["adaptive", "invscaling", "constant"] + "values": ["invscaling", "adaptive", "constant"] } }, { @@ -201337,7 +200509,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPClassifier.__init__.learning_rate_init", "default_value": "0.001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.001", "description": "The initial learning rate used. It controls the step-size\nin updating the weights. Only used when solver='sgd' or 'adam'." @@ -201353,7 +200525,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPClassifier.__init__.power_t", "default_value": "0.5", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.5", "description": "The exponent for inverse scaling learning rate.\nIt is used in updating effective learning rate when the learning_rate\nis set to 'invscaling'. Only used when solver='sgd'." @@ -201369,7 +200541,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPClassifier.__init__.max_iter", "default_value": "200", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=200", "description": "Maximum number of iterations. The solver iterates until convergence\n(determined by 'tol') or this number of iterations. For stochastic\nsolvers ('sgd', 'adam'), note that this determines the number of epochs\n(how many times each data point will be used), not the number of\ngradient steps." @@ -201385,7 +200557,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPClassifier.__init__.shuffle", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to shuffle samples in each iteration. Only used when\nsolver='sgd' or 'adam'." @@ -201401,7 +200573,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPClassifier.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance, default=None", "description": "Determines random number generation for weights and bias\ninitialization, train-test split if early stopping is used, and batch\nsampling when solver='sgd' or 'adam'.\nPass an int for reproducible results across multiple function calls.\nSee :term:`Glossary `." @@ -201426,7 +200598,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPClassifier.__init__.tol", "default_value": "0.0001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-4", "description": "Tolerance for the optimization. When the loss or score is not improving\nby at least ``tol`` for ``n_iter_no_change`` consecutive iterations,\nunless ``learning_rate`` is set to 'adaptive', convergence is\nconsidered to be reached and training stops." @@ -201442,7 +200614,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPClassifier.__init__.verbose", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Whether to print progress messages to stdout." @@ -201458,7 +200630,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPClassifier.__init__.warm_start", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "When set to True, reuse the solution of the previous\ncall to fit as initialization, otherwise, just erase the\nprevious solution. See :term:`the Glossary `." @@ -201474,7 +200646,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPClassifier.__init__.momentum", "default_value": "0.9", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.9", "description": "Momentum for gradient descent update. Should be between 0 and 1. Only\nused when solver='sgd'." @@ -201490,7 +200662,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPClassifier.__init__.nesterovs_momentum", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to use Nesterov's momentum. Only used when solver='sgd' and\nmomentum > 0." @@ -201506,7 +200678,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPClassifier.__init__.early_stopping", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Whether to use early stopping to terminate training when validation\nscore is not improving. If set to true, it will automatically set\naside 10% of training data as validation and terminate training when\nvalidation score is not improving by at least tol for\n``n_iter_no_change`` consecutive epochs. The split is stratified,\nexcept in a multilabel setting.\nIf early stopping is False, then the training stops when the training\nloss does not improve by more than tol for n_iter_no_change consecutive\npasses over the training set.\nOnly effective when solver='sgd' or 'adam'." @@ -201522,7 +200694,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPClassifier.__init__.validation_fraction", "default_value": "0.1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.1", "description": "The proportion of training data to set aside as validation set for\nearly stopping. Must be between 0 and 1.\nOnly used if early_stopping is True." @@ -201538,7 +200710,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPClassifier.__init__.beta_1", "default_value": "0.9", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.9", "description": "Exponential decay rate for estimates of first moment vector in adam,\nshould be in [0, 1). Only used when solver='adam'." @@ -201554,7 +200726,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPClassifier.__init__.beta_2", "default_value": "0.999", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.999", "description": "Exponential decay rate for estimates of second moment vector in adam,\nshould be in [0, 1). Only used when solver='adam'." @@ -201570,7 +200742,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPClassifier.__init__.epsilon", "default_value": "1e-08", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-8", "description": "Value for numerical stability in adam. Only used when solver='adam'." @@ -201586,7 +200758,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPClassifier.__init__.n_iter_no_change", "default_value": "10", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=10", "description": "Maximum number of epochs to not meet ``tol`` improvement.\nOnly effective when solver='sgd' or 'adam'.\n\n.. versionadded:: 0.20" @@ -201602,7 +200774,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPClassifier.__init__.max_fun", "default_value": "15000", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=15000", "description": "Only used when solver='lbfgs'. Maximum number of loss function calls.\nThe solver iterates until convergence (determined by 'tol'), number\nof iterations reaches max_iter, or this number of loss function calls.\nNote that number of loss function calls will be greater than or equal\nto the number of iterations for the `MLPClassifier`.\n\n.. versionadded:: 0.22" @@ -201614,7 +200786,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -201732,7 +200904,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPClassifier.partial_fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -201745,7 +200917,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPClassifier.partial_fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The input data." @@ -201770,7 +200942,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPClassifier.partial_fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "The target values." @@ -201786,7 +200958,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPClassifier.partial_fit.classes", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array of shape (n_classes,), default=None", "description": "Classes across all calls to partial_fit.\nCan be obtained via `np.unique(y_all)`, where y_all is the\ntarget vector of the entire dataset.\nThis argument is required for the first call to partial_fit\nand can be omitted in the subsequent calls.\nNote that y doesn't need to contain all labels in `classes`." @@ -201798,7 +200970,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Update the model with a single iteration over the given data.", "docstring": "Update the model with a single iteration over the given data.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input data.\n\n y : array-like of shape (n_samples,)\n The target values.\n\n classes : array of shape (n_classes,), default=None\n Classes across all calls to partial_fit.\n Can be obtained via `np.unique(y_all)`, where y_all is the\n target vector of the entire dataset.\n This argument is required for the first call to partial_fit\n and can be omitted in the subsequent calls.\n Note that y doesn't need to contain all labels in `classes`.\n\n Returns\n -------\n self : object\n Trained MLP model.\n " }, @@ -201814,7 +200986,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPClassifier.predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -201827,7 +200999,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPClassifier.predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The input data." @@ -201848,7 +201020,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict using the multi-layer perceptron classifier.", "docstring": "Predict using the multi-layer perceptron classifier.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input data.\n\n Returns\n -------\n y : ndarray, shape (n_samples,) or (n_samples, n_classes)\n The predicted classes.\n " }, @@ -201864,7 +201036,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPClassifier.predict_log_proba.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -201877,7 +201049,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPClassifier.predict_log_proba.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (n_samples, n_features)", "description": "The input data." @@ -201889,7 +201061,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Return the log of probability estimates.", "docstring": "Return the log of probability estimates.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n The input data.\n\n Returns\n -------\n log_y_prob : ndarray of shape (n_samples, n_classes)\n The predicted log-probability of the sample for each class\n in the model, where classes are ordered as they are in\n `self.classes_`. Equivalent to `log(predict_proba(X))`.\n " }, @@ -201905,7 +201077,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPClassifier.predict_proba.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -201918,7 +201090,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPClassifier.predict_proba.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The input data." @@ -201939,7 +201111,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Probability estimates.", "docstring": "Probability estimates.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input data.\n\n Returns\n -------\n y_prob : ndarray of shape (n_samples, n_classes)\n The predicted probability of the sample for each class in the\n model, where classes are ordered as they are in `self.classes_`.\n " }, @@ -201955,7 +201127,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPRegressor.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -201968,7 +201140,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPRegressor.__init__.hidden_layer_sizes", "default_value": "(100, )", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "tuple, length = n_layers - 2, default=(100,)", "description": "The ith element represents the number of neurons in the ith\nhidden layer." @@ -201993,7 +201165,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPRegressor.__init__.activation", "default_value": "'relu'", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{'identity', 'logistic', 'tanh', 'relu'}, default='relu'", "description": "Activation function for the hidden layer.\n\n- 'identity', no-op activation, useful to implement linear bottleneck,\n returns f(x) = x\n\n- 'logistic', the logistic sigmoid function,\n returns f(x) = 1 / (1 + exp(-x)).\n\n- 'tanh', the hyperbolic tan function,\n returns f(x) = tanh(x).\n\n- 'relu', the rectified linear unit function,\n returns f(x) = max(0, x)" @@ -202009,7 +201181,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPRegressor.__init__.solver", "default_value": "'adam'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'lbfgs', 'sgd', 'adam'}, default='adam'", "description": "The solver for weight optimization.\n\n- 'lbfgs' is an optimizer in the family of quasi-Newton methods.\n\n- 'sgd' refers to stochastic gradient descent.\n\n- 'adam' refers to a stochastic gradient-based optimizer proposed by\n Kingma, Diederik, and Jimmy Ba\n\nNote: The default solver 'adam' works pretty well on relatively\nlarge datasets (with thousands of training samples or more) in terms of\nboth training time and validation score.\nFor small datasets, however, 'lbfgs' can converge faster and perform\nbetter." @@ -202025,7 +201197,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPRegressor.__init__.alpha", "default_value": "0.0001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0001", "description": "Strength of the L2 regularization term. The L2 regularization term\nis divided by the sample size when added to the loss." @@ -202041,7 +201213,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPRegressor.__init__.batch_size", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default='auto'", "description": "Size of minibatches for stochastic optimizers.\nIf the solver is 'lbfgs', the classifier will not use minibatch.\nWhen set to \"auto\", `batch_size=min(200, n_samples)`." @@ -202057,14 +201229,14 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPRegressor.__init__.learning_rate", "default_value": "'constant'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'constant', 'invscaling', 'adaptive'}, default='constant'", "description": "Learning rate schedule for weight updates.\n\n- 'constant' is a constant learning rate given by\n 'learning_rate_init'.\n\n- 'invscaling' gradually decreases the learning rate ``learning_rate_``\n at each time step 't' using an inverse scaling exponent of 'power_t'.\n effective_learning_rate = learning_rate_init / pow(t, power_t)\n\n- 'adaptive' keeps the learning rate constant to\n 'learning_rate_init' as long as training loss keeps decreasing.\n Each time two consecutive epochs fail to decrease training loss by at\n least tol, or fail to increase validation score by at least tol if\n 'early_stopping' is on, the current learning rate is divided by 5.\n\nOnly used when solver='sgd'." }, "type": { "kind": "EnumType", - "values": ["adaptive", "invscaling", "constant"] + "values": ["invscaling", "adaptive", "constant"] } }, { @@ -202073,7 +201245,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPRegressor.__init__.learning_rate_init", "default_value": "0.001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.001", "description": "The initial learning rate used. It controls the step-size\nin updating the weights. Only used when solver='sgd' or 'adam'." @@ -202089,7 +201261,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPRegressor.__init__.power_t", "default_value": "0.5", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.5", "description": "The exponent for inverse scaling learning rate.\nIt is used in updating effective learning rate when the learning_rate\nis set to 'invscaling'. Only used when solver='sgd'." @@ -202105,7 +201277,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPRegressor.__init__.max_iter", "default_value": "200", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=200", "description": "Maximum number of iterations. The solver iterates until convergence\n(determined by 'tol') or this number of iterations. For stochastic\nsolvers ('sgd', 'adam'), note that this determines the number of epochs\n(how many times each data point will be used), not the number of\ngradient steps." @@ -202121,7 +201293,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPRegressor.__init__.shuffle", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to shuffle samples in each iteration. Only used when\nsolver='sgd' or 'adam'." @@ -202137,7 +201309,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPRegressor.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance, default=None", "description": "Determines random number generation for weights and bias\ninitialization, train-test split if early stopping is used, and batch\nsampling when solver='sgd' or 'adam'.\nPass an int for reproducible results across multiple function calls.\nSee :term:`Glossary `." @@ -202162,7 +201334,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPRegressor.__init__.tol", "default_value": "0.0001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-4", "description": "Tolerance for the optimization. When the loss or score is not improving\nby at least ``tol`` for ``n_iter_no_change`` consecutive iterations,\nunless ``learning_rate`` is set to 'adaptive', convergence is\nconsidered to be reached and training stops." @@ -202178,7 +201350,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPRegressor.__init__.verbose", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Whether to print progress messages to stdout." @@ -202194,7 +201366,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPRegressor.__init__.warm_start", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "When set to True, reuse the solution of the previous\ncall to fit as initialization, otherwise, just erase the\nprevious solution. See :term:`the Glossary `." @@ -202210,7 +201382,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPRegressor.__init__.momentum", "default_value": "0.9", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.9", "description": "Momentum for gradient descent update. Should be between 0 and 1. Only\nused when solver='sgd'." @@ -202226,7 +201398,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPRegressor.__init__.nesterovs_momentum", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to use Nesterov's momentum. Only used when solver='sgd' and\nmomentum > 0." @@ -202242,7 +201414,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPRegressor.__init__.early_stopping", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Whether to use early stopping to terminate training when validation\nscore is not improving. If set to true, it will automatically set\naside 10% of training data as validation and terminate training when\nvalidation score is not improving by at least ``tol`` for\n``n_iter_no_change`` consecutive epochs.\nOnly effective when solver='sgd' or 'adam'." @@ -202258,7 +201430,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPRegressor.__init__.validation_fraction", "default_value": "0.1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.1", "description": "The proportion of training data to set aside as validation set for\nearly stopping. Must be between 0 and 1.\nOnly used if early_stopping is True." @@ -202274,7 +201446,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPRegressor.__init__.beta_1", "default_value": "0.9", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.9", "description": "Exponential decay rate for estimates of first moment vector in adam,\nshould be in [0, 1). Only used when solver='adam'." @@ -202290,7 +201462,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPRegressor.__init__.beta_2", "default_value": "0.999", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.999", "description": "Exponential decay rate for estimates of second moment vector in adam,\nshould be in [0, 1). Only used when solver='adam'." @@ -202306,7 +201478,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPRegressor.__init__.epsilon", "default_value": "1e-08", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-8", "description": "Value for numerical stability in adam. Only used when solver='adam'." @@ -202322,7 +201494,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPRegressor.__init__.n_iter_no_change", "default_value": "10", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=10", "description": "Maximum number of epochs to not meet ``tol`` improvement.\nOnly effective when solver='sgd' or 'adam'.\n\n.. versionadded:: 0.20" @@ -202338,7 +201510,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPRegressor.__init__.max_fun", "default_value": "15000", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=15000", "description": "Only used when solver='lbfgs'. Maximum number of function calls.\nThe solver iterates until convergence (determined by 'tol'), number\nof iterations reaches max_iter, or this number of function calls.\nNote that number of function calls will be greater than or equal to\nthe number of iterations for the MLPRegressor.\n\n.. versionadded:: 0.22" @@ -202350,7 +201522,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -202443,7 +201615,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPRegressor.predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -202456,7 +201628,7 @@ "qname": "sklearn.neural_network._multilayer_perceptron.MLPRegressor.predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The input data." @@ -202477,7 +201649,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict using the multi-layer perceptron model.", "docstring": "Predict using the multi-layer perceptron model.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input data.\n\n Returns\n -------\n y : ndarray of shape (n_samples, n_outputs)\n The predicted values.\n " }, @@ -202531,7 +201703,7 @@ "qname": "sklearn.neural_network._rbm.BernoulliRBM.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -202544,7 +201716,7 @@ "qname": "sklearn.neural_network._rbm.BernoulliRBM.__init__.n_components", "default_value": "256", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=256", "description": "Number of binary hidden units." @@ -202560,7 +201732,7 @@ "qname": "sklearn.neural_network._rbm.BernoulliRBM.__init__.learning_rate", "default_value": "0.1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.1", "description": "The learning rate for weight updates. It is *highly* recommended\nto tune this hyper-parameter. Reasonable values are in the\n10**[0., -3.] range." @@ -202576,7 +201748,7 @@ "qname": "sklearn.neural_network._rbm.BernoulliRBM.__init__.batch_size", "default_value": "10", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=10", "description": "Number of examples per minibatch." @@ -202592,7 +201764,7 @@ "qname": "sklearn.neural_network._rbm.BernoulliRBM.__init__.n_iter", "default_value": "10", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=10", "description": "Number of iterations/sweeps over the training dataset to perform\nduring training." @@ -202608,7 +201780,7 @@ "qname": "sklearn.neural_network._rbm.BernoulliRBM.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "The verbosity level. The default, zero, means silent mode. Range\nof values is [0, inf]." @@ -202624,7 +201796,7 @@ "qname": "sklearn.neural_network._rbm.BernoulliRBM.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Determines random number generation for:\n\n- Gibbs sampling from visible and hidden layers.\n\n- Initializing components, sampling from layers during fit.\n\n- Corrupting the data when scoring samples.\n\nPass an int for reproducible results across multiple function calls.\nSee :term:`Glossary `." @@ -202649,7 +201821,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -202943,7 +202115,7 @@ "qname": "sklearn.neural_network._rbm.BernoulliRBM.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -202956,7 +202128,7 @@ "qname": "sklearn.neural_network._rbm.BernoulliRBM.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Training data." @@ -202981,7 +202153,7 @@ "qname": "sklearn.neural_network._rbm.BernoulliRBM.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,) or (n_samples, n_outputs), default=None", "description": "Target values (None for unsupervised transformations)." @@ -202993,7 +202165,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the model to the data X.", "docstring": "Fit the model to the data X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training data.\n\n y : array-like of shape (n_samples,) or (n_samples, n_outputs), default=None\n Target values (None for unsupervised transformations).\n\n Returns\n -------\n self : BernoulliRBM\n The fitted model.\n " }, @@ -203009,7 +202181,7 @@ "qname": "sklearn.neural_network._rbm.BernoulliRBM.gibbs.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -203022,7 +202194,7 @@ "qname": "sklearn.neural_network._rbm.BernoulliRBM.gibbs.v", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (n_samples, n_features)", "description": "Values of the visible layer to start from." @@ -203034,7 +202206,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Perform one Gibbs sampling step.", "docstring": "Perform one Gibbs sampling step.\n\n Parameters\n ----------\n v : ndarray of shape (n_samples, n_features)\n Values of the visible layer to start from.\n\n Returns\n -------\n v_new : ndarray of shape (n_samples, n_features)\n Values of the visible layer after one Gibbs step.\n " }, @@ -203050,7 +202222,7 @@ "qname": "sklearn.neural_network._rbm.BernoulliRBM.partial_fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -203063,7 +202235,7 @@ "qname": "sklearn.neural_network._rbm.BernoulliRBM.partial_fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (n_samples, n_features)", "description": "Training data." @@ -203079,7 +202251,7 @@ "qname": "sklearn.neural_network._rbm.BernoulliRBM.partial_fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,) or (n_samples, n_outputs), default=None", "description": "Target values (None for unsupervised transformations)." @@ -203091,7 +202263,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the model to the partial segment of the data X.", "docstring": "Fit the model to the partial segment of the data X.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Training data.\n\n y : array-like of shape (n_samples,) or (n_samples, n_outputs), default=None\n Target values (None for unsupervised transformations).\n\n Returns\n -------\n self : BernoulliRBM\n The fitted model.\n " }, @@ -203107,7 +202279,7 @@ "qname": "sklearn.neural_network._rbm.BernoulliRBM.score_samples.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -203120,7 +202292,7 @@ "qname": "sklearn.neural_network._rbm.BernoulliRBM.score_samples.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Values of the visible layer. Must be all-boolean (not checked)." @@ -203141,7 +202313,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute the pseudo-likelihood of X.", "docstring": "Compute the pseudo-likelihood of X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Values of the visible layer. Must be all-boolean (not checked).\n\n Returns\n -------\n pseudo_likelihood : ndarray of shape (n_samples,)\n Value of the pseudo-likelihood (proxy for likelihood).\n\n Notes\n -----\n This method is not deterministic: it computes a quantity called the\n free energy on X, then on a randomly corrupted version of X, and\n returns the log of the logistic function of the difference.\n " }, @@ -203157,7 +202329,7 @@ "qname": "sklearn.neural_network._rbm.BernoulliRBM.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -203170,7 +202342,7 @@ "qname": "sklearn.neural_network._rbm.BernoulliRBM.transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The data to be transformed." @@ -203191,7 +202363,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute the hidden layer activation probabilities, P(h=1|v=X).", "docstring": "Compute the hidden layer activation probabilities, P(h=1|v=X).\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data to be transformed.\n\n Returns\n -------\n h : ndarray of shape (n_samples, n_components)\n Latent representations of the data.\n " }, @@ -203625,7 +202797,7 @@ }, "type": { "kind": "EnumType", - "values": ["adaptive", "invscaling", "constant"] + "values": ["invscaling", "adaptive", "constant"] } }, { @@ -206277,7 +205449,7 @@ "qname": "sklearn.preprocessing._data.Binarizer.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -206290,7 +205462,7 @@ "qname": "sklearn.preprocessing._data.Binarizer.__init__.threshold", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0", "description": "Feature values below or equal to this are replaced by 0, above it by 1.\nThreshold may not be less than 0 for operations on sparse matrices." @@ -206306,7 +205478,7 @@ "qname": "sklearn.preprocessing._data.Binarizer.__init__.copy", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Set to False to perform inplace binarization and avoid a copy (if\nthe input is already a numpy array or a scipy.sparse CSR matrix)." @@ -206318,7 +205490,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -206359,7 +205531,7 @@ "qname": "sklearn.preprocessing._data.Binarizer.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -206372,7 +205544,7 @@ "qname": "sklearn.preprocessing._data.Binarizer.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The data." @@ -206397,7 +205569,7 @@ "qname": "sklearn.preprocessing._data.Binarizer.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "None", "description": "Ignored." @@ -206409,7 +205581,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Do nothing and return the estimator unchanged.\n\nThis method is just there to implement the usual API and hence\nwork in pipelines.", "docstring": "Do nothing and return the estimator unchanged.\n\n This method is just there to implement the usual API and hence\n work in pipelines.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data.\n\n y : None\n Ignored.\n\n Returns\n -------\n self : object\n Fitted transformer.\n " }, @@ -206425,7 +205597,7 @@ "qname": "sklearn.preprocessing._data.Binarizer.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -206438,7 +205610,7 @@ "qname": "sklearn.preprocessing._data.Binarizer.transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The data to binarize, element by element.\nscipy.sparse matrices should be in CSR format to avoid an\nun-necessary copy." @@ -206463,7 +205635,7 @@ "qname": "sklearn.preprocessing._data.Binarizer.transform.copy", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "bool", "description": "Copy the input X or not." @@ -206475,7 +205647,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Binarize each element of X.", "docstring": "Binarize each element of X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data to binarize, element by element.\n scipy.sparse matrices should be in CSR format to avoid an\n un-necessary copy.\n\n copy : bool\n Copy the input X or not.\n\n Returns\n -------\n X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)\n Transformed array.\n " }, @@ -206491,7 +205663,7 @@ "qname": "sklearn.preprocessing._data.KernelCenterer.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -206500,7 +205672,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -206566,7 +205738,7 @@ "qname": "sklearn.preprocessing._data.KernelCenterer.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -206579,7 +205751,7 @@ "qname": "sklearn.preprocessing._data.KernelCenterer.fit.K", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (n_samples, n_samples)", "description": "Kernel matrix." @@ -206595,7 +205767,7 @@ "qname": "sklearn.preprocessing._data.KernelCenterer.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "None", "description": "Ignored." @@ -206607,7 +205779,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit KernelCenterer.", "docstring": "Fit KernelCenterer.\n\n Parameters\n ----------\n K : ndarray of shape (n_samples, n_samples)\n Kernel matrix.\n\n y : None\n Ignored.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n " }, @@ -206623,7 +205795,7 @@ "qname": "sklearn.preprocessing._data.KernelCenterer.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -206636,7 +205808,7 @@ "qname": "sklearn.preprocessing._data.KernelCenterer.transform.K", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (n_samples1, n_samples2)", "description": "Kernel matrix." @@ -206652,7 +205824,7 @@ "qname": "sklearn.preprocessing._data.KernelCenterer.transform.copy", "default_value": "True", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Set to False to perform inplace computation." @@ -206664,7 +205836,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Center kernel matrix.", "docstring": "Center kernel matrix.\n\n Parameters\n ----------\n K : ndarray of shape (n_samples1, n_samples2)\n Kernel matrix.\n\n copy : bool, default=True\n Set to False to perform inplace computation.\n\n Returns\n -------\n K_new : ndarray of shape (n_samples1, n_samples2)\n Returns the instance itself.\n " }, @@ -206680,7 +205852,7 @@ "qname": "sklearn.preprocessing._data.MaxAbsScaler.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -206693,7 +205865,7 @@ "qname": "sklearn.preprocessing._data.MaxAbsScaler.__init__.copy", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Set to False to perform inplace scaling and avoid a copy (if the input\nis already a numpy array)." @@ -206705,7 +205877,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -206771,7 +205943,7 @@ "qname": "sklearn.preprocessing._data.MaxAbsScaler.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -206784,7 +205956,7 @@ "qname": "sklearn.preprocessing._data.MaxAbsScaler.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The data used to compute the per-feature minimum and maximum\nused for later scaling along the features axis." @@ -206809,7 +205981,7 @@ "qname": "sklearn.preprocessing._data.MaxAbsScaler.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "None", "description": "Ignored." @@ -206821,7 +205993,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute the maximum absolute value to be used for later scaling.", "docstring": "Compute the maximum absolute value to be used for later scaling.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data used to compute the per-feature minimum and maximum\n used for later scaling along the features axis.\n\n y : None\n Ignored.\n\n Returns\n -------\n self : object\n Fitted scaler.\n " }, @@ -206837,7 +206009,7 @@ "qname": "sklearn.preprocessing._data.MaxAbsScaler.inverse_transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -206850,7 +206022,7 @@ "qname": "sklearn.preprocessing._data.MaxAbsScaler.inverse_transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The data that should be transformed back." @@ -206871,7 +206043,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Scale back the data to the original representation.", "docstring": "Scale back the data to the original representation.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data that should be transformed back.\n\n Returns\n -------\n X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)\n Transformed array.\n " }, @@ -206887,7 +206059,7 @@ "qname": "sklearn.preprocessing._data.MaxAbsScaler.partial_fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -206900,7 +206072,7 @@ "qname": "sklearn.preprocessing._data.MaxAbsScaler.partial_fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The data used to compute the mean and standard deviation\nused for later scaling along the features axis." @@ -206925,7 +206097,7 @@ "qname": "sklearn.preprocessing._data.MaxAbsScaler.partial_fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "None", "description": "Ignored." @@ -206937,7 +206109,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Online computation of max absolute value of X for later scaling.\n\nAll of X is processed as a single batch. This is intended for cases\nwhen :meth:`fit` is not feasible due to very large number of\n`n_samples` or because X is read from a continuous stream.", "docstring": "Online computation of max absolute value of X for later scaling.\n\n All of X is processed as a single batch. This is intended for cases\n when :meth:`fit` is not feasible due to very large number of\n `n_samples` or because X is read from a continuous stream.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data used to compute the mean and standard deviation\n used for later scaling along the features axis.\n\n y : None\n Ignored.\n\n Returns\n -------\n self : object\n Fitted scaler.\n " }, @@ -206953,7 +206125,7 @@ "qname": "sklearn.preprocessing._data.MaxAbsScaler.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -206966,7 +206138,7 @@ "qname": "sklearn.preprocessing._data.MaxAbsScaler.transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The data that should be scaled." @@ -206987,7 +206159,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Scale the data.", "docstring": "Scale the data.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data that should be scaled.\n\n Returns\n -------\n X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)\n Transformed array.\n " }, @@ -207003,7 +206175,7 @@ "qname": "sklearn.preprocessing._data.MinMaxScaler.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -207016,7 +206188,7 @@ "qname": "sklearn.preprocessing._data.MinMaxScaler.__init__.feature_range", "default_value": "(0, 1)", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "tuple (min, max), default=(0, 1)", "description": "Desired range of transformed data." @@ -207032,7 +206204,7 @@ "qname": "sklearn.preprocessing._data.MinMaxScaler.__init__.copy", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Set to False to perform inplace row normalization and avoid a\ncopy (if the input is already a numpy array)." @@ -207048,7 +206220,7 @@ "qname": "sklearn.preprocessing._data.MinMaxScaler.__init__.clip", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Set to True to clip transformed values of held-out data to\nprovided `feature range`.\n\n.. versionadded:: 0.24" @@ -207060,7 +206232,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -207126,7 +206298,7 @@ "qname": "sklearn.preprocessing._data.MinMaxScaler.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -207139,7 +206311,7 @@ "qname": "sklearn.preprocessing._data.MinMaxScaler.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "The data used to compute the per-feature minimum and maximum\nused for later scaling along the features axis." @@ -207155,7 +206327,7 @@ "qname": "sklearn.preprocessing._data.MinMaxScaler.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "None", "description": "Ignored." @@ -207167,7 +206339,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute the minimum and maximum to be used for later scaling.", "docstring": "Compute the minimum and maximum to be used for later scaling.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The data used to compute the per-feature minimum and maximum\n used for later scaling along the features axis.\n\n y : None\n Ignored.\n\n Returns\n -------\n self : object\n Fitted scaler.\n " }, @@ -207183,7 +206355,7 @@ "qname": "sklearn.preprocessing._data.MinMaxScaler.inverse_transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -207196,7 +206368,7 @@ "qname": "sklearn.preprocessing._data.MinMaxScaler.inverse_transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Input data that will be transformed. It cannot be sparse." @@ -207208,7 +206380,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Undo the scaling of X according to feature_range.", "docstring": "Undo the scaling of X according to feature_range.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Input data that will be transformed. It cannot be sparse.\n\n Returns\n -------\n Xt : ndarray of shape (n_samples, n_features)\n Transformed data.\n " }, @@ -207224,7 +206396,7 @@ "qname": "sklearn.preprocessing._data.MinMaxScaler.partial_fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -207237,7 +206409,7 @@ "qname": "sklearn.preprocessing._data.MinMaxScaler.partial_fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "The data used to compute the mean and standard deviation\nused for later scaling along the features axis." @@ -207253,7 +206425,7 @@ "qname": "sklearn.preprocessing._data.MinMaxScaler.partial_fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "None", "description": "Ignored." @@ -207265,7 +206437,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Online computation of min and max on X for later scaling.\n\nAll of X is processed as a single batch. This is intended for cases\nwhen :meth:`fit` is not feasible due to very large number of\n`n_samples` or because X is read from a continuous stream.", "docstring": "Online computation of min and max on X for later scaling.\n\n All of X is processed as a single batch. This is intended for cases\n when :meth:`fit` is not feasible due to very large number of\n `n_samples` or because X is read from a continuous stream.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The data used to compute the mean and standard deviation\n used for later scaling along the features axis.\n\n y : None\n Ignored.\n\n Returns\n -------\n self : object\n Fitted scaler.\n " }, @@ -207281,7 +206453,7 @@ "qname": "sklearn.preprocessing._data.MinMaxScaler.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -207294,7 +206466,7 @@ "qname": "sklearn.preprocessing._data.MinMaxScaler.transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Input data that will be transformed." @@ -207306,7 +206478,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Scale features of X according to feature_range.", "docstring": "Scale features of X according to feature_range.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Input data that will be transformed.\n\n Returns\n -------\n Xt : ndarray of shape (n_samples, n_features)\n Transformed data.\n " }, @@ -207322,7 +206494,7 @@ "qname": "sklearn.preprocessing._data.Normalizer.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -207335,14 +206507,14 @@ "qname": "sklearn.preprocessing._data.Normalizer.__init__.norm", "default_value": "'l2'", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{'l1', 'l2', 'max'}, default='l2'", "description": "The norm to use to normalize each non zero sample. If norm='max'\nis used, values will be rescaled by the maximum of the absolute\nvalues." }, "type": { "kind": "EnumType", - "values": ["l2", "max", "l1"] + "values": ["l2", "l1", "max"] } }, { @@ -207351,7 +206523,7 @@ "qname": "sklearn.preprocessing._data.Normalizer.__init__.copy", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Set to False to perform inplace row normalization and avoid a\ncopy (if the input is already a numpy array or a scipy.sparse\nCSR matrix)." @@ -207363,7 +206535,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -207404,7 +206576,7 @@ "qname": "sklearn.preprocessing._data.Normalizer.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -207417,7 +206589,7 @@ "qname": "sklearn.preprocessing._data.Normalizer.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The data to estimate the normalization parameters." @@ -207442,7 +206614,7 @@ "qname": "sklearn.preprocessing._data.Normalizer.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present here for API consistency by convention." @@ -207454,7 +206626,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Do nothing and return the estimator unchanged.\n\nThis method is just there to implement the usual API and hence\nwork in pipelines.", "docstring": "Do nothing and return the estimator unchanged.\n\n This method is just there to implement the usual API and hence\n work in pipelines.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data to estimate the normalization parameters.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n self : object\n Fitted transformer.\n " }, @@ -207470,7 +206642,7 @@ "qname": "sklearn.preprocessing._data.Normalizer.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -207483,7 +206655,7 @@ "qname": "sklearn.preprocessing._data.Normalizer.transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The data to normalize, row by row. scipy.sparse matrices should be\nin CSR format to avoid an un-necessary copy." @@ -207508,7 +206680,7 @@ "qname": "sklearn.preprocessing._data.Normalizer.transform.copy", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=None", "description": "Copy the input X or not." @@ -207520,7 +206692,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Scale each non zero row of X to unit norm.", "docstring": "Scale each non zero row of X to unit norm.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data to normalize, row by row. scipy.sparse matrices should be\n in CSR format to avoid an un-necessary copy.\n\n copy : bool, default=None\n Copy the input X or not.\n\n Returns\n -------\n X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)\n Transformed array.\n " }, @@ -207536,7 +206708,7 @@ "qname": "sklearn.preprocessing._data.PowerTransformer.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -207549,14 +206721,14 @@ "qname": "sklearn.preprocessing._data.PowerTransformer.__init__.method", "default_value": "'yeo-johnson'", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{'yeo-johnson', 'box-cox'}, default='yeo-johnson'", "description": "The power transform method. Available methods are:\n\n- 'yeo-johnson' [1]_, works with positive and negative values\n- 'box-cox' [2]_, only works with strictly positive values" }, "type": { "kind": "EnumType", - "values": ["box-cox", "yeo-johnson"] + "values": ["yeo-johnson", "box-cox"] } }, { @@ -207565,7 +206737,7 @@ "qname": "sklearn.preprocessing._data.PowerTransformer.__init__.standardize", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Set to True to apply zero-mean, unit-variance normalization to the\ntransformed output." @@ -207581,7 +206753,7 @@ "qname": "sklearn.preprocessing._data.PowerTransformer.__init__.copy", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Set to False to perform inplace computation during transformation." @@ -207593,7 +206765,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -208032,7 +207204,7 @@ "qname": "sklearn.preprocessing._data.PowerTransformer.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -208045,7 +207217,7 @@ "qname": "sklearn.preprocessing._data.PowerTransformer.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "The data used to estimate the optimal transformation parameters." @@ -208061,7 +207233,7 @@ "qname": "sklearn.preprocessing._data.PowerTransformer.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "None", "description": "Ignored." @@ -208073,7 +207245,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Estimate the optimal parameter lambda for each feature.\n\nThe optimal lambda parameter for minimizing skewness is estimated on\neach feature independently using maximum likelihood.", "docstring": "Estimate the optimal parameter lambda for each feature.\n\n The optimal lambda parameter for minimizing skewness is estimated on\n each feature independently using maximum likelihood.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The data used to estimate the optimal transformation parameters.\n\n y : None\n Ignored.\n\n Returns\n -------\n self : object\n Fitted transformer.\n " }, @@ -208089,7 +207261,7 @@ "qname": "sklearn.preprocessing._data.PowerTransformer.fit_transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -208102,7 +207274,7 @@ "qname": "sklearn.preprocessing._data.PowerTransformer.fit_transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "The data used to estimate the optimal transformation parameters\nand to be transformed using a power transformation." @@ -208118,7 +207290,7 @@ "qname": "sklearn.preprocessing._data.PowerTransformer.fit_transform.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -208130,7 +207302,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit `PowerTransformer` to `X`, then transform `X`.", "docstring": "Fit `PowerTransformer` to `X`, then transform `X`.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The data used to estimate the optimal transformation parameters\n and to be transformed using a power transformation.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n X_new : ndarray of shape (n_samples, n_features)\n Transformed data.\n " }, @@ -208146,7 +207318,7 @@ "qname": "sklearn.preprocessing._data.PowerTransformer.inverse_transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -208159,7 +207331,7 @@ "qname": "sklearn.preprocessing._data.PowerTransformer.inverse_transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "The transformed data." @@ -208171,7 +207343,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Apply the inverse power transformation using the fitted lambdas.\n\nThe inverse of the Box-Cox transformation is given by::\n\n if lambda_ == 0:\n X = exp(X_trans)\n else:\n X = (X_trans * lambda_ + 1) ** (1 / lambda_)\n\nThe inverse of the Yeo-Johnson transformation is given by::\n\n if X >= 0 and lambda_ == 0:\n X = exp(X_trans) - 1\n elif X >= 0 and lambda_ != 0:\n X = (X_trans * lambda_ + 1) ** (1 / lambda_) - 1\n elif X < 0 and lambda_ != 2:\n X = 1 - (-(2 - lambda_) * X_trans + 1) ** (1 / (2 - lambda_))\n elif X < 0 and lambda_ == 2:\n X = 1 - exp(-X_trans)", "docstring": "Apply the inverse power transformation using the fitted lambdas.\n\n The inverse of the Box-Cox transformation is given by::\n\n if lambda_ == 0:\n X = exp(X_trans)\n else:\n X = (X_trans * lambda_ + 1) ** (1 / lambda_)\n\n The inverse of the Yeo-Johnson transformation is given by::\n\n if X >= 0 and lambda_ == 0:\n X = exp(X_trans) - 1\n elif X >= 0 and lambda_ != 0:\n X = (X_trans * lambda_ + 1) ** (1 / lambda_) - 1\n elif X < 0 and lambda_ != 2:\n X = 1 - (-(2 - lambda_) * X_trans + 1) ** (1 / (2 - lambda_))\n elif X < 0 and lambda_ == 2:\n X = 1 - exp(-X_trans)\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The transformed data.\n\n Returns\n -------\n X : ndarray of shape (n_samples, n_features)\n The original data.\n " }, @@ -208187,7 +207359,7 @@ "qname": "sklearn.preprocessing._data.PowerTransformer.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -208200,7 +207372,7 @@ "qname": "sklearn.preprocessing._data.PowerTransformer.transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "The data to be transformed using a power transformation." @@ -208212,7 +207384,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Apply the power transform to each feature using the fitted lambdas.", "docstring": "Apply the power transform to each feature using the fitted lambdas.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The data to be transformed using a power transformation.\n\n Returns\n -------\n X_trans : ndarray of shape (n_samples, n_features)\n The transformed data.\n " }, @@ -208228,7 +207400,7 @@ "qname": "sklearn.preprocessing._data.QuantileTransformer.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -208241,7 +207413,7 @@ "qname": "sklearn.preprocessing._data.QuantileTransformer.__init__.n_quantiles", "default_value": "1000", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1000 or n_samples", "description": "Number of quantiles to be computed. It corresponds to the number\nof landmarks used to discretize the cumulative distribution function.\nIf n_quantiles is larger than the number of samples, n_quantiles is set\nto the number of samples as a larger number of quantiles does not give\na better approximation of the cumulative distribution function\nestimator." @@ -208257,7 +207429,7 @@ "qname": "sklearn.preprocessing._data.QuantileTransformer.__init__.output_distribution", "default_value": "'uniform'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'uniform', 'normal'}, default='uniform'", "description": "Marginal distribution for the transformed data. The choices are\n'uniform' (default) or 'normal'." @@ -208273,7 +207445,7 @@ "qname": "sklearn.preprocessing._data.QuantileTransformer.__init__.ignore_implicit_zeros", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Only applies to sparse matrices. If True, the sparse entries of the\nmatrix are discarded to compute the quantile statistics. If False,\nthese entries are treated as zeros." @@ -208289,7 +207461,7 @@ "qname": "sklearn.preprocessing._data.QuantileTransformer.__init__.subsample", "default_value": "int(100000.0)", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1e5", "description": "Maximum number of samples used to estimate the quantiles for\ncomputational efficiency. Note that the subsampling procedure may\ndiffer for value-identical sparse and dense matrices." @@ -208305,7 +207477,7 @@ "qname": "sklearn.preprocessing._data.QuantileTransformer.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Determines random number generation for subsampling and smoothing\nnoise.\nPlease see ``subsample`` for more details.\nPass an int for reproducible results across multiple function calls.\nSee :term:`Glossary `." @@ -208334,7 +207506,7 @@ "qname": "sklearn.preprocessing._data.QuantileTransformer.__init__.copy", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Set to False to perform inplace transformation and avoid a copy (if the\ninput is already a numpy array)." @@ -208346,7 +207518,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -208693,7 +207865,7 @@ "qname": "sklearn.preprocessing._data.QuantileTransformer.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -208706,7 +207878,7 @@ "qname": "sklearn.preprocessing._data.QuantileTransformer.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The data used to scale along the features axis. If a sparse\nmatrix is provided, it will be converted into a sparse\n``csc_matrix``. Additionally, the sparse matrix needs to be\nnonnegative if `ignore_implicit_zeros` is False." @@ -208731,7 +207903,7 @@ "qname": "sklearn.preprocessing._data.QuantileTransformer.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "None", "description": "Ignored." @@ -208743,7 +207915,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute the quantiles used for transforming.", "docstring": "Compute the quantiles used for transforming.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data used to scale along the features axis. If a sparse\n matrix is provided, it will be converted into a sparse\n ``csc_matrix``. Additionally, the sparse matrix needs to be\n nonnegative if `ignore_implicit_zeros` is False.\n\n y : None\n Ignored.\n\n Returns\n -------\n self : object\n Fitted transformer.\n " }, @@ -208759,7 +207931,7 @@ "qname": "sklearn.preprocessing._data.QuantileTransformer.inverse_transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -208772,7 +207944,7 @@ "qname": "sklearn.preprocessing._data.QuantileTransformer.inverse_transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The data used to scale along the features axis. If a sparse\nmatrix is provided, it will be converted into a sparse\n``csc_matrix``. Additionally, the sparse matrix needs to be\nnonnegative if `ignore_implicit_zeros` is False." @@ -208793,7 +207965,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Back-projection to the original space.", "docstring": "Back-projection to the original space.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data used to scale along the features axis. If a sparse\n matrix is provided, it will be converted into a sparse\n ``csc_matrix``. Additionally, the sparse matrix needs to be\n nonnegative if `ignore_implicit_zeros` is False.\n\n Returns\n -------\n Xt : {ndarray, sparse matrix} of (n_samples, n_features)\n The projected data.\n " }, @@ -208809,7 +207981,7 @@ "qname": "sklearn.preprocessing._data.QuantileTransformer.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -208822,7 +207994,7 @@ "qname": "sklearn.preprocessing._data.QuantileTransformer.transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The data used to scale along the features axis. If a sparse\nmatrix is provided, it will be converted into a sparse\n``csc_matrix``. Additionally, the sparse matrix needs to be\nnonnegative if `ignore_implicit_zeros` is False." @@ -208843,7 +208015,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Feature-wise transformation of the data.", "docstring": "Feature-wise transformation of the data.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data used to scale along the features axis. If a sparse\n matrix is provided, it will be converted into a sparse\n ``csc_matrix``. Additionally, the sparse matrix needs to be\n nonnegative if `ignore_implicit_zeros` is False.\n\n Returns\n -------\n Xt : {ndarray, sparse matrix} of shape (n_samples, n_features)\n The projected data.\n " }, @@ -208859,7 +208031,7 @@ "qname": "sklearn.preprocessing._data.RobustScaler.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -208872,7 +208044,7 @@ "qname": "sklearn.preprocessing._data.RobustScaler.__init__.with_centering", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If `True`, center the data before scaling.\nThis will cause :meth:`transform` to raise an exception when attempted\non sparse matrices, because centering them entails building a dense\nmatrix which in common use cases is likely to be too large to fit in\nmemory." @@ -208888,7 +208060,7 @@ "qname": "sklearn.preprocessing._data.RobustScaler.__init__.with_scaling", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If `True`, scale the data to interquartile range." @@ -208904,7 +208076,7 @@ "qname": "sklearn.preprocessing._data.RobustScaler.__init__.quantile_range", "default_value": "(25.0, 75.0)", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0, default=(25.0, 75.0)", "description": "Quantile range used to calculate `scale_`. By default this is equal to\nthe IQR, i.e., `q_min` is the first quantile and `q_max` is the third\nquantile.\n\n.. versionadded:: 0.18" @@ -208929,7 +208101,7 @@ "qname": "sklearn.preprocessing._data.RobustScaler.__init__.copy", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If `False`, try to avoid a copy and do inplace scaling instead.\nThis is not guaranteed to always work inplace; e.g. if the data is\nnot a NumPy array or scipy.sparse CSR matrix, a copy may still be\nreturned." @@ -208945,7 +208117,7 @@ "qname": "sklearn.preprocessing._data.RobustScaler.__init__.unit_variance", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If `True`, scale data so that normally distributed features have a\nvariance of 1. In general, if the difference between the x-values of\n`q_max` and `q_min` for a standard normal distribution is greater\nthan 1, the dataset will be scaled down. If less than 1, the dataset\nwill be scaled up.\n\n.. versionadded:: 0.24" @@ -208957,7 +208129,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -208998,7 +208170,7 @@ "qname": "sklearn.preprocessing._data.RobustScaler.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -209011,7 +208183,7 @@ "qname": "sklearn.preprocessing._data.RobustScaler.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The data used to compute the median and quantiles\nused for later scaling along the features axis." @@ -209036,7 +208208,7 @@ "qname": "sklearn.preprocessing._data.RobustScaler.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present here for API consistency by convention." @@ -209048,7 +208220,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute the median and quantiles to be used for scaling.", "docstring": "Compute the median and quantiles to be used for scaling.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data used to compute the median and quantiles\n used for later scaling along the features axis.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n self : object\n Fitted scaler.\n " }, @@ -209064,7 +208236,7 @@ "qname": "sklearn.preprocessing._data.RobustScaler.inverse_transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -209077,7 +208249,7 @@ "qname": "sklearn.preprocessing._data.RobustScaler.inverse_transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The rescaled data to be transformed back." @@ -209098,7 +208270,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Scale back the data to the original representation.", "docstring": "Scale back the data to the original representation.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The rescaled data to be transformed back.\n\n Returns\n -------\n X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)\n Transformed array.\n " }, @@ -209114,7 +208286,7 @@ "qname": "sklearn.preprocessing._data.RobustScaler.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -209127,7 +208299,7 @@ "qname": "sklearn.preprocessing._data.RobustScaler.transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The data used to scale along the specified axis." @@ -209148,7 +208320,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Center and scale the data.", "docstring": "Center and scale the data.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data used to scale along the specified axis.\n\n Returns\n -------\n X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)\n Transformed array.\n " }, @@ -209164,7 +208336,7 @@ "qname": "sklearn.preprocessing._data.StandardScaler.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -209177,7 +208349,7 @@ "qname": "sklearn.preprocessing._data.StandardScaler.__init__.copy", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If False, try to avoid a copy and do inplace scaling instead.\nThis is not guaranteed to always work inplace; e.g. if the data is\nnot a NumPy array or scipy.sparse CSR matrix, a copy may still be\nreturned." @@ -209193,7 +208365,7 @@ "qname": "sklearn.preprocessing._data.StandardScaler.__init__.with_mean", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If True, center the data before scaling.\nThis does not work (and will raise an exception) when attempted on\nsparse matrices, because centering them entails building a dense\nmatrix which in common use cases is likely to be too large to fit in\nmemory." @@ -209209,7 +208381,7 @@ "qname": "sklearn.preprocessing._data.StandardScaler.__init__.with_std", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If True, scale the data to unit variance (or equivalently,\nunit standard deviation)." @@ -209221,7 +208393,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -209287,7 +208459,7 @@ "qname": "sklearn.preprocessing._data.StandardScaler.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -209300,7 +208472,7 @@ "qname": "sklearn.preprocessing._data.StandardScaler.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The data used to compute the mean and standard deviation\nused for later scaling along the features axis." @@ -209325,7 +208497,7 @@ "qname": "sklearn.preprocessing._data.StandardScaler.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "None", "description": "Ignored." @@ -209341,7 +208513,7 @@ "qname": "sklearn.preprocessing._data.StandardScaler.fit.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "Individual weights for each sample.\n\n.. versionadded:: 0.24\n parameter *sample_weight* support to StandardScaler." @@ -209353,7 +208525,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute the mean and std to be used for later scaling.", "docstring": "Compute the mean and std to be used for later scaling.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data used to compute the mean and standard deviation\n used for later scaling along the features axis.\n\n y : None\n Ignored.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Individual weights for each sample.\n\n .. versionadded:: 0.24\n parameter *sample_weight* support to StandardScaler.\n\n Returns\n -------\n self : object\n Fitted scaler.\n " }, @@ -209369,7 +208541,7 @@ "qname": "sklearn.preprocessing._data.StandardScaler.inverse_transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -209382,7 +208554,7 @@ "qname": "sklearn.preprocessing._data.StandardScaler.inverse_transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The data used to scale along the features axis." @@ -209407,7 +208579,7 @@ "qname": "sklearn.preprocessing._data.StandardScaler.inverse_transform.copy", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=None", "description": "Copy the input X or not." @@ -209419,7 +208591,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Scale back the data to the original representation.", "docstring": "Scale back the data to the original representation.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data used to scale along the features axis.\n copy : bool, default=None\n Copy the input X or not.\n\n Returns\n -------\n X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)\n Transformed array.\n " }, @@ -209435,7 +208607,7 @@ "qname": "sklearn.preprocessing._data.StandardScaler.partial_fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -209448,7 +208620,7 @@ "qname": "sklearn.preprocessing._data.StandardScaler.partial_fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The data used to compute the mean and standard deviation\nused for later scaling along the features axis." @@ -209473,7 +208645,7 @@ "qname": "sklearn.preprocessing._data.StandardScaler.partial_fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "None", "description": "Ignored." @@ -209489,7 +208661,7 @@ "qname": "sklearn.preprocessing._data.StandardScaler.partial_fit.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "Individual weights for each sample.\n\n.. versionadded:: 0.24\n parameter *sample_weight* support to StandardScaler." @@ -209501,7 +208673,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Online computation of mean and std on X for later scaling.\n\nAll of X is processed as a single batch. This is intended for cases\nwhen :meth:`fit` is not feasible due to very large number of\n`n_samples` or because X is read from a continuous stream.\n\nThe algorithm for incremental mean and std is given in Equation 1.5a,b\nin Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. \"Algorithms\nfor computing the sample variance: Analysis and recommendations.\"\nThe American Statistician 37.3 (1983): 242-247:", "docstring": "Online computation of mean and std on X for later scaling.\n\n All of X is processed as a single batch. This is intended for cases\n when :meth:`fit` is not feasible due to very large number of\n `n_samples` or because X is read from a continuous stream.\n\n The algorithm for incremental mean and std is given in Equation 1.5a,b\n in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. \"Algorithms\n for computing the sample variance: Analysis and recommendations.\"\n The American Statistician 37.3 (1983): 242-247:\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data used to compute the mean and standard deviation\n used for later scaling along the features axis.\n\n y : None\n Ignored.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Individual weights for each sample.\n\n .. versionadded:: 0.24\n parameter *sample_weight* support to StandardScaler.\n\n Returns\n -------\n self : object\n Fitted scaler.\n " }, @@ -209517,7 +208689,7 @@ "qname": "sklearn.preprocessing._data.StandardScaler.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -209530,7 +208702,7 @@ "qname": "sklearn.preprocessing._data.StandardScaler.transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix of shape (n_samples, n_features)", "description": "The data used to scale along the features axis." @@ -209555,7 +208727,7 @@ "qname": "sklearn.preprocessing._data.StandardScaler.transform.copy", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=None", "description": "Copy the input X or not." @@ -209567,7 +208739,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Perform standardization by centering and scaling.", "docstring": "Perform standardization by centering and scaling.\n\n Parameters\n ----------\n X : {array-like, sparse matrix of shape (n_samples, n_features)\n The data used to scale along the features axis.\n copy : bool, default=None\n Copy the input X or not.\n\n Returns\n -------\n X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)\n Transformed array.\n " }, @@ -209984,7 +209156,7 @@ }, "type": { "kind": "EnumType", - "values": ["l2", "max", "l1"] + "values": ["l2", "l1", "max"] } }, { @@ -210076,7 +209248,7 @@ }, "type": { "kind": "EnumType", - "values": ["box-cox", "yeo-johnson"] + "values": ["yeo-johnson", "box-cox"] } }, { @@ -210534,7 +209706,7 @@ "qname": "sklearn.preprocessing._discretization.KBinsDiscretizer.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -210547,7 +209719,7 @@ "qname": "sklearn.preprocessing._discretization.KBinsDiscretizer.__init__.n_bins", "default_value": "5", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int or array-like of shape (n_features,), default=5", "description": "The number of bins to produce. Raises ValueError if ``n_bins < 2``." @@ -210572,14 +209744,14 @@ "qname": "sklearn.preprocessing._discretization.KBinsDiscretizer.__init__.encode", "default_value": "'onehot'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'onehot', 'onehot-dense', 'ordinal'}, default='onehot'", "description": "Method used to encode the transformed result.\n\n- 'onehot': Encode the transformed result with one-hot encoding\n and return a sparse matrix. Ignored features are always\n stacked to the right.\n- 'onehot-dense': Encode the transformed result with one-hot encoding\n and return a dense array. Ignored features are always\n stacked to the right.\n- 'ordinal': Return the bin identifier encoded as an integer value." }, "type": { "kind": "EnumType", - "values": ["onehot", "ordinal", "onehot-dense"] + "values": ["onehot", "onehot-dense", "ordinal"] } }, { @@ -210588,14 +209760,14 @@ "qname": "sklearn.preprocessing._discretization.KBinsDiscretizer.__init__.strategy", "default_value": "'quantile'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'uniform', 'quantile', 'kmeans'}, default='quantile'", "description": "Strategy used to define the widths of the bins.\n\n- 'uniform': All bins in each feature have identical widths.\n- 'quantile': All bins in each feature have the same number of points.\n- 'kmeans': Values in each bin have the same nearest center of a 1D\n k-means cluster." }, "type": { "kind": "EnumType", - "values": ["kmeans", "uniform", "quantile"] + "values": ["uniform", "quantile", "kmeans"] } }, { @@ -210604,7 +209776,7 @@ "qname": "sklearn.preprocessing._discretization.KBinsDiscretizer.__init__.dtype", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{np.float32, np.float64}, default=None", "description": "The desired data-type for the output. If None, output dtype is\nconsistent with input dtype. Only np.float32 and np.float64 are\nsupported.\n\n.. versionadded:: 0.24" @@ -210620,7 +209792,7 @@ "qname": "sklearn.preprocessing._discretization.KBinsDiscretizer.__init__.subsample", "default_value": "'warn'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or None (default='warn')", "description": "Maximum number of samples, used to fit the model, for computational\nefficiency. Used when `strategy=\"quantile\"`.\n`subsample=None` means that all the training samples are used when\ncomputing the quantiles that determine the binning thresholds.\nSince quantile computation relies on sorting each column of `X` and\nthat sorting has an `n log(n)` time complexity,\nit is recommended to use subsampling on datasets with a\nvery large number of samples.\n\n.. deprecated:: 1.1\n In version 1.3 and onwards, `subsample=2e5` will be the default." @@ -210645,7 +209817,7 @@ "qname": "sklearn.preprocessing._discretization.KBinsDiscretizer.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Determines random number generation for subsampling.\nPass an int for reproducible results across multiple function calls.\nSee the `subsample` parameter for more details.\nSee :term:`Glossary `.\n\n.. versionadded:: 1.1" @@ -210670,7 +209842,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -210724,7 +209896,7 @@ "qname": "sklearn.preprocessing._discretization.KBinsDiscretizer.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -210737,7 +209909,7 @@ "qname": "sklearn.preprocessing._discretization.KBinsDiscretizer.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Data to be discretized." @@ -210753,7 +209925,7 @@ "qname": "sklearn.preprocessing._discretization.KBinsDiscretizer.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "None", "description": "Ignored. This parameter exists only for compatibility with\n:class:`~sklearn.pipeline.Pipeline`." @@ -210765,7 +209937,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the estimator.", "docstring": "\n Fit the estimator.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Data to be discretized.\n\n y : None\n Ignored. This parameter exists only for compatibility with\n :class:`~sklearn.pipeline.Pipeline`.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n " }, @@ -210781,7 +209953,7 @@ "qname": "sklearn.preprocessing._discretization.KBinsDiscretizer.get_feature_names_out.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -210794,7 +209966,7 @@ "qname": "sklearn.preprocessing._discretization.KBinsDiscretizer.get_feature_names_out.input_features", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of str or None, default=None", "description": "Input features.\n\n- If `input_features` is `None`, then `feature_names_in_` is\n used as feature names in. If `feature_names_in_` is not defined,\n then the following input feature names are generated:\n `[\"x0\", \"x1\", ..., \"x(n_features_in_ - 1)\"]`.\n- If `input_features` is an array-like, then `input_features` must\n match `feature_names_in_` if `feature_names_in_` is defined." @@ -210815,7 +209987,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Get output feature names.", "docstring": "Get output feature names.\n\n Parameters\n ----------\n input_features : array-like of str or None, default=None\n Input features.\n\n - If `input_features` is `None`, then `feature_names_in_` is\n used as feature names in. If `feature_names_in_` is not defined,\n then the following input feature names are generated:\n `[\"x0\", \"x1\", ..., \"x(n_features_in_ - 1)\"]`.\n - If `input_features` is an array-like, then `input_features` must\n match `feature_names_in_` if `feature_names_in_` is defined.\n\n Returns\n -------\n feature_names_out : ndarray of str objects\n Transformed feature names.\n " }, @@ -210831,7 +210003,7 @@ "qname": "sklearn.preprocessing._discretization.KBinsDiscretizer.inverse_transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -210844,7 +210016,7 @@ "qname": "sklearn.preprocessing._discretization.KBinsDiscretizer.inverse_transform.Xt", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Transformed data in the binned space." @@ -210856,7 +210028,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Transform discretized data back to original feature space.\n\nNote that this function does not regenerate the original data\ndue to discretization rounding.", "docstring": "\n Transform discretized data back to original feature space.\n\n Note that this function does not regenerate the original data\n due to discretization rounding.\n\n Parameters\n ----------\n Xt : array-like of shape (n_samples, n_features)\n Transformed data in the binned space.\n\n Returns\n -------\n Xinv : ndarray, dtype={np.float32, np.float64}\n Data in the original feature space.\n " }, @@ -210872,7 +210044,7 @@ "qname": "sklearn.preprocessing._discretization.KBinsDiscretizer.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -210885,7 +210057,7 @@ "qname": "sklearn.preprocessing._discretization.KBinsDiscretizer.transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Data to be discretized." @@ -210897,7 +210069,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Discretize the data.", "docstring": "\n Discretize the data.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Data to be discretized.\n\n Returns\n -------\n Xt : {ndarray, sparse matrix}, dtype={np.float32, np.float64}\n Data in the binned space. Will be a sparse matrix if\n `self.encode='onehot'` and ndarray otherwise.\n " }, @@ -210913,7 +210085,7 @@ "qname": "sklearn.preprocessing._encoders.OneHotEncoder.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -210926,7 +210098,7 @@ "qname": "sklearn.preprocessing._encoders.OneHotEncoder.__init__.categories", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "'auto' or a list of array-like, default='auto'", "description": "Categories (unique values) per feature:\n\n- 'auto' : Determine categories automatically from the training data.\n- list : ``categories[i]`` holds the categories expected in the ith\n column. The passed categories should not mix strings and numeric\n values within a single feature, and should be sorted in case of\n numeric values.\n\nThe used categories can be found in the ``categories_`` attribute.\n\n.. versionadded:: 0.20" @@ -210951,7 +210123,7 @@ "qname": "sklearn.preprocessing._encoders.OneHotEncoder.__init__.drop", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'first', 'if_binary'} or an array-like of shape (n_features,), default=None", "description": "Specifies a methodology to use to drop one of the categories per\nfeature. This is useful in situations where perfectly collinear\nfeatures cause problems, such as when feeding the resulting data\ninto an unregularized linear regression model.\n\nHowever, dropping one category breaks the symmetry of the original\nrepresentation and can therefore induce a bias in downstream models,\nfor instance for penalized linear classification or regression models.\n\n- None : retain all features (the default).\n- 'first' : drop the first category in each feature. If only one\n category is present, the feature will be dropped entirely.\n- 'if_binary' : drop the first category in each feature with two\n categories. Features with 1 or more than 2 categories are\n left intact.\n- array : ``drop[i]`` is the category in feature ``X[:, i]`` that\n should be dropped.\n\n.. versionadded:: 0.21\n The parameter `drop` was added in 0.21.\n\n.. versionchanged:: 0.23\n The option `drop='if_binary'` was added in 0.23.\n\n.. versionchanged:: 1.1\n Support for dropping infrequent categories." @@ -210976,7 +210148,7 @@ "qname": "sklearn.preprocessing._encoders.OneHotEncoder.__init__.sparse", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Will return sparse matrix if set True else will return an array." @@ -210992,7 +210164,7 @@ "qname": "sklearn.preprocessing._encoders.OneHotEncoder.__init__.dtype", "default_value": "np.float64", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "number type, default=float", "description": "Desired dtype of output." @@ -211008,14 +210180,14 @@ "qname": "sklearn.preprocessing._encoders.OneHotEncoder.__init__.handle_unknown", "default_value": "'error'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'error', 'ignore', 'infrequent_if_exist'}, default='error'", "description": "Specifies the way unknown categories are handled during :meth:`transform`.\n\n- 'error' : Raise an error if an unknown category is present during transform.\n- 'ignore' : When an unknown category is encountered during\n transform, the resulting one-hot encoded columns for this feature\n will be all zeros. In the inverse transform, an unknown category\n will be denoted as None.\n- 'infrequent_if_exist' : When an unknown category is encountered\n during transform, the resulting one-hot encoded columns for this\n feature will map to the infrequent category if it exists. The\n infrequent category will be mapped to the last position in the\n encoding. During inverse transform, an unknown category will be\n mapped to the category denoted `'infrequent'` if it exists. If the\n `'infrequent'` category does not exist, then :meth:`transform` and\n :meth:`inverse_transform` will handle an unknown category as with\n `handle_unknown='ignore'`. Infrequent categories exist based on\n `min_frequency` and `max_categories`. Read more in the\n :ref:`User Guide `.\n\n.. versionchanged:: 1.1\n `'infrequent_if_exist'` was added to automatically handle unknown\n categories and infrequent categories." }, "type": { "kind": "EnumType", - "values": ["ignore", "infrequent_if_exist", "error"] + "values": ["ignore", "error", "infrequent_if_exist"] } }, { @@ -211024,7 +210196,7 @@ "qname": "sklearn.preprocessing._encoders.OneHotEncoder.__init__.min_frequency", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or float, default=None", "description": "Specifies the minimum frequency below which a category will be\nconsidered infrequent.\n\n- If `int`, categories with a smaller cardinality will be considered\n infrequent.\n\n- If `float`, categories with a smaller cardinality than\n `min_frequency * n_samples` will be considered infrequent.\n\n.. versionadded:: 1.1\n Read more in the :ref:`User Guide `." @@ -211049,7 +210221,7 @@ "qname": "sklearn.preprocessing._encoders.OneHotEncoder.__init__.max_categories", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Specifies an upper limit to the number of output features for each input\nfeature when considering infrequent categories. If there are infrequent\ncategories, `max_categories` includes the category representing the\ninfrequent categories along with the frequent categories. If `None`,\nthere is no limit to the number of output features.\n\n.. versionadded:: 1.1\n Read more in the :ref:`User Guide `." @@ -211061,7 +210233,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -211217,8 +210389,8 @@ ], "results": [], "is_public": false, - "description": "Fit infrequent categories.\n\nDefines the private attribute: `_default_to_infrequent_mappings`. For\nfeature `i`, `_default_to_infrequent_mappings[i]` defines the mapping\nfrom the integer encoding returned by `super().transform()` into\ninfrequent categories. If `_default_to_infrequent_mappings[i]` is None,\nthere were no infrequent categories in the training set.\n\nFor example if categories 0, 2 and 4 were frequent, while categories\n1, 3, 5 were infrequent for feature 7, then these categories are mapped\nto a single output:\n`_default_to_infrequent_mappings[7] = array([0, 3, 1, 3, 2, 3])`\n\nDefines private attribute: `_infrequent_indices`. `_infrequent_indices[i]`\nis an array of indices such that\n`categories_[i][_infrequent_indices[i]]` are all the infrequent category\nlabels. If the feature `i` has no infrequent categories\n`_infrequent_indices[i]` is None.\n\n.. versionadded:: 1.1", - "docstring": "Fit infrequent categories.\n\n Defines the private attribute: `_default_to_infrequent_mappings`. For\n feature `i`, `_default_to_infrequent_mappings[i]` defines the mapping\n from the integer encoding returned by `super().transform()` into\n infrequent categories. If `_default_to_infrequent_mappings[i]` is None,\n there were no infrequent categories in the training set.\n\n For example if categories 0, 2 and 4 were frequent, while categories\n 1, 3, 5 were infrequent for feature 7, then these categories are mapped\n to a single output:\n `_default_to_infrequent_mappings[7] = array([0, 3, 1, 3, 2, 3])`\n\n Defines private attribute: `_infrequent_indices`. `_infrequent_indices[i]`\n is an array of indices such that\n `categories_[i][_infrequent_indices[i]]` are all the infrequent category\n labels. If the feature `i` has no infrequent categories\n `_infrequent_indices[i]` is None.\n\n .. versionadded:: 1.1\n\n Parameters\n ----------\n n_samples : int\n Number of samples in training set.\n category_counts: list of ndarray\n `category_counts[i]` is the category counts corresponding to\n `self.categories_[i]`.\n " + "description": "Fit infrequent categories.\n\nDefines the private attribute: `_default_to_infrequent_mappings`. For\nfeature `i`, `_default_to_infrequent_mappings[i]` defines the mapping\nfrom the integer encoding returned by `super().transform()` into\ninfrequent categories. If `_default_to_infrequent_mappings[i]` is None,\nthere were no infrequent categories in the training set.\n\nFor example if categories 0, 2 and 4 were frequent, while categories\n1, 3, 5 were infrequent for feature 7, then these categories are mapped\nto a single output:\n`_default_to_infrequent_mappings[7] = array([0, 3, 1, 3, 2, 3])`\n\nDefines private attrite: `_infrequent_indices`. `_infrequent_indices[i]`\nis an array of indices such that\n`categories_[i][_infrequent_indices[i]]` are all the infrequent category\nlabels. If the feature `i` has no infrequent categories\n`_infrequent_indices[i]` is None.\n\n.. versionadded:: 1.1", + "docstring": "Fit infrequent categories.\n\n Defines the private attribute: `_default_to_infrequent_mappings`. For\n feature `i`, `_default_to_infrequent_mappings[i]` defines the mapping\n from the integer encoding returned by `super().transform()` into\n infrequent categories. If `_default_to_infrequent_mappings[i]` is None,\n there were no infrequent categories in the training set.\n\n For example if categories 0, 2 and 4 were frequent, while categories\n 1, 3, 5 were infrequent for feature 7, then these categories are mapped\n to a single output:\n `_default_to_infrequent_mappings[7] = array([0, 3, 1, 3, 2, 3])`\n\n Defines private attrite: `_infrequent_indices`. `_infrequent_indices[i]`\n is an array of indices such that\n `categories_[i][_infrequent_indices[i]]` are all the infrequent category\n labels. If the feature `i` has no infrequent categories\n `_infrequent_indices[i]` is None.\n\n .. versionadded:: 1.1\n\n Parameters\n ----------\n n_samples : int\n Number of samples in training set.\n category_counts: list of ndarray\n `category_counts[i]` is the category counts corresponding to\n `self.categories_[i]`.\n " }, { "id": "sklearn/sklearn.preprocessing._encoders/OneHotEncoder/_identify_infrequent", @@ -211483,7 +210655,7 @@ "qname": "sklearn.preprocessing._encoders.OneHotEncoder.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -211496,7 +210668,7 @@ "qname": "sklearn.preprocessing._encoders.OneHotEncoder.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "The data to determine the categories of each feature." @@ -211512,7 +210684,7 @@ "qname": "sklearn.preprocessing._encoders.OneHotEncoder.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "None", "description": "Ignored. This parameter exists only for compatibility with\n:class:`~sklearn.pipeline.Pipeline`." @@ -211524,7 +210696,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit OneHotEncoder to X.", "docstring": "\n Fit OneHotEncoder to X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The data to determine the categories of each feature.\n\n y : None\n Ignored. This parameter exists only for compatibility with\n :class:`~sklearn.pipeline.Pipeline`.\n\n Returns\n -------\n self\n Fitted encoder.\n " }, @@ -211540,7 +210712,7 @@ "qname": "sklearn.preprocessing._encoders.OneHotEncoder.fit_transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -211553,7 +210725,7 @@ "qname": "sklearn.preprocessing._encoders.OneHotEncoder.fit_transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "The data to encode." @@ -211569,7 +210741,7 @@ "qname": "sklearn.preprocessing._encoders.OneHotEncoder.fit_transform.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "None", "description": "Ignored. This parameter exists only for compatibility with\n:class:`~sklearn.pipeline.Pipeline`." @@ -211581,7 +210753,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit OneHotEncoder to X, then transform X.\n\nEquivalent to fit(X).transform(X) but more convenient.", "docstring": "\n Fit OneHotEncoder to X, then transform X.\n\n Equivalent to fit(X).transform(X) but more convenient.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The data to encode.\n\n y : None\n Ignored. This parameter exists only for compatibility with\n :class:`~sklearn.pipeline.Pipeline`.\n\n Returns\n -------\n X_out : {ndarray, sparse matrix} of shape (n_samples, n_encoded_features)\n Transformed input. If `sparse=True`, a sparse matrix will be\n returned.\n " }, @@ -211599,7 +210771,7 @@ "qname": "sklearn.preprocessing._encoders.OneHotEncoder.get_feature_names.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -211612,7 +210784,7 @@ "qname": "sklearn.preprocessing._encoders.OneHotEncoder.get_feature_names.input_features", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "list of str of shape (n_features,)", "description": "String names for input features if available. By default,\n\"x0\", \"x1\", ... \"xn_features\" is used." @@ -211624,7 +210796,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Return feature names for output features.\n\nFor a given input feature, if there is an infrequent category, the most\n'infrequent_sklearn' will be used as a feature name.", "docstring": "Return feature names for output features.\n\n For a given input feature, if there is an infrequent category, the most\n 'infrequent_sklearn' will be used as a feature name.\n\n Parameters\n ----------\n input_features : list of str of shape (n_features,)\n String names for input features if available. By default,\n \"x0\", \"x1\", ... \"xn_features\" is used.\n\n Returns\n -------\n output_feature_names : ndarray of shape (n_output_features,)\n Array of feature names.\n " }, @@ -211640,7 +210812,7 @@ "qname": "sklearn.preprocessing._encoders.OneHotEncoder.get_feature_names_out.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -211653,7 +210825,7 @@ "qname": "sklearn.preprocessing._encoders.OneHotEncoder.get_feature_names_out.input_features", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of str or None, default=None", "description": "Input features.\n\n- If `input_features` is `None`, then `feature_names_in_` is\n used as feature names in. If `feature_names_in_` is not defined,\n then the following input feature names are generated:\n `[\"x0\", \"x1\", ..., \"x(n_features_in_ - 1)\"]`.\n- If `input_features` is an array-like, then `input_features` must\n match `feature_names_in_` if `feature_names_in_` is defined." @@ -211674,7 +210846,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Get output feature names for transformation.", "docstring": "Get output feature names for transformation.\n\n Parameters\n ----------\n input_features : array-like of str or None, default=None\n Input features.\n\n - If `input_features` is `None`, then `feature_names_in_` is\n used as feature names in. If `feature_names_in_` is not defined,\n then the following input feature names are generated:\n `[\"x0\", \"x1\", ..., \"x(n_features_in_ - 1)\"]`.\n - If `input_features` is an array-like, then `input_features` must\n match `feature_names_in_` if `feature_names_in_` is defined.\n\n Returns\n -------\n feature_names_out : ndarray of str objects\n Transformed feature names.\n " }, @@ -211690,7 +210862,7 @@ "qname": "sklearn.preprocessing._encoders.OneHotEncoder.infrequent_categories_.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -211699,7 +210871,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Infrequent categories for each feature.", "docstring": "Infrequent categories for each feature." }, @@ -211715,7 +210887,7 @@ "qname": "sklearn.preprocessing._encoders.OneHotEncoder.inverse_transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -211728,7 +210900,7 @@ "qname": "sklearn.preprocessing._encoders.OneHotEncoder.inverse_transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_encoded_features)", "description": "The transformed data." @@ -211749,7 +210921,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Convert the data back to the original representation.\n\nWhen unknown categories are encountered (all zeros in the\none-hot encoding), ``None`` is used to represent this category. If the\nfeature with the unknown category has a dropped category, the dropped\ncategory will be its inverse.\n\nFor a given input feature, if there is an infrequent category,\n'infrequent_sklearn' will be used to represent the infrequent category.", "docstring": "\n Convert the data back to the original representation.\n\n When unknown categories are encountered (all zeros in the\n one-hot encoding), ``None`` is used to represent this category. If the\n feature with the unknown category has a dropped category, the dropped\n category will be its inverse.\n\n For a given input feature, if there is an infrequent category,\n 'infrequent_sklearn' will be used to represent the infrequent category.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_encoded_features)\n The transformed data.\n\n Returns\n -------\n X_tr : ndarray of shape (n_samples, n_features)\n Inverse transformed array.\n " }, @@ -211765,7 +210937,7 @@ "qname": "sklearn.preprocessing._encoders.OneHotEncoder.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -211778,7 +210950,7 @@ "qname": "sklearn.preprocessing._encoders.OneHotEncoder.transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "The data to encode." @@ -211790,7 +210962,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Transform X using one-hot encoding.\n\nIf there are infrequent categories for a feature, the infrequent\ncategories will be grouped into a single category.", "docstring": "\n Transform X using one-hot encoding.\n\n If there are infrequent categories for a feature, the infrequent\n categories will be grouped into a single category.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The data to encode.\n\n Returns\n -------\n X_out : {ndarray, sparse matrix} of shape (n_samples, n_encoded_features)\n Transformed input. If `sparse=True`, a sparse matrix will be\n returned.\n " }, @@ -211806,7 +210978,7 @@ "qname": "sklearn.preprocessing._encoders.OrdinalEncoder.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -211819,7 +210991,7 @@ "qname": "sklearn.preprocessing._encoders.OrdinalEncoder.__init__.categories", "default_value": "'auto'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "'auto' or a list of array-like, default='auto'", "description": "Categories (unique values) per feature:\n\n- 'auto' : Determine categories automatically from the training data.\n- list : ``categories[i]`` holds the categories expected in the ith\n column. The passed categories should not mix strings and numeric\n values, and should be sorted in case of numeric values.\n\nThe used categories can be found in the ``categories_`` attribute." @@ -211844,7 +211016,7 @@ "qname": "sklearn.preprocessing._encoders.OrdinalEncoder.__init__.dtype", "default_value": "np.float64", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "number type, default np.float64", "description": "Desired dtype of output." @@ -211869,14 +211041,14 @@ "qname": "sklearn.preprocessing._encoders.OrdinalEncoder.__init__.handle_unknown", "default_value": "'error'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'error', 'use_encoded_value'}, default='error'", "description": "When set to 'error' an error will be raised in case an unknown\ncategorical feature is present during transform. When set to\n'use_encoded_value', the encoded value of unknown categories will be\nset to the value given for the parameter `unknown_value`. In\n:meth:`inverse_transform`, an unknown category will be denoted as None.\n\n.. versionadded:: 0.24" }, "type": { "kind": "EnumType", - "values": ["use_encoded_value", "error"] + "values": ["error", "use_encoded_value"] } }, { @@ -211885,7 +211057,7 @@ "qname": "sklearn.preprocessing._encoders.OrdinalEncoder.__init__.unknown_value", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or np.nan, default=None", "description": "When the parameter handle_unknown is set to 'use_encoded_value', this\nparameter is required and will set the encoded value of unknown\ncategories. It has to be distinct from the values used to encode any of\nthe categories in `fit`. If set to np.nan, the `dtype` parameter must\nbe a float dtype.\n\n.. versionadded:: 0.24" @@ -211910,7 +211082,7 @@ "qname": "sklearn.preprocessing._encoders.OrdinalEncoder.__init__.encoded_missing_value", "default_value": "np.nan", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or np.nan, default=np.nan", "description": "Encoded value of missing categories. If set to `np.nan`, then the `dtype`\nparameter must be a float dtype.\n\n.. versionadded:: 1.1" @@ -211931,7 +211103,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -211947,7 +211119,7 @@ "qname": "sklearn.preprocessing._encoders.OrdinalEncoder.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -211960,7 +211132,7 @@ "qname": "sklearn.preprocessing._encoders.OrdinalEncoder.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "The data to determine the categories of each feature." @@ -211976,7 +211148,7 @@ "qname": "sklearn.preprocessing._encoders.OrdinalEncoder.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "None", "description": "Ignored. This parameter exists only for compatibility with\n:class:`~sklearn.pipeline.Pipeline`." @@ -211988,7 +211160,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the OrdinalEncoder to X.", "docstring": "\n Fit the OrdinalEncoder to X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The data to determine the categories of each feature.\n\n y : None\n Ignored. This parameter exists only for compatibility with\n :class:`~sklearn.pipeline.Pipeline`.\n\n Returns\n -------\n self : object\n Fitted encoder.\n " }, @@ -212004,7 +211176,7 @@ "qname": "sklearn.preprocessing._encoders.OrdinalEncoder.inverse_transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -212017,7 +211189,7 @@ "qname": "sklearn.preprocessing._encoders.OrdinalEncoder.inverse_transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_encoded_features)", "description": "The transformed data." @@ -212029,7 +211201,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Convert the data back to the original representation.", "docstring": "\n Convert the data back to the original representation.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_encoded_features)\n The transformed data.\n\n Returns\n -------\n X_tr : ndarray of shape (n_samples, n_features)\n Inverse transformed array.\n " }, @@ -212045,7 +211217,7 @@ "qname": "sklearn.preprocessing._encoders.OrdinalEncoder.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -212058,7 +211230,7 @@ "qname": "sklearn.preprocessing._encoders.OrdinalEncoder.transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "The data to encode." @@ -212070,7 +211242,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Transform X to ordinal codes.", "docstring": "\n Transform X to ordinal codes.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The data to encode.\n\n Returns\n -------\n X_out : ndarray of shape (n_samples, n_features)\n Transformed input.\n " }, @@ -212367,7 +211539,7 @@ "qname": "sklearn.preprocessing._function_transformer.FunctionTransformer.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -212380,7 +211552,7 @@ "qname": "sklearn.preprocessing._function_transformer.FunctionTransformer.__init__.func", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "callable, default=None", "description": "The callable to use for the transformation. This will be passed\nthe same arguments as transform, with args and kwargs forwarded.\nIf func is None, then func will be the identity function." @@ -212396,7 +211568,7 @@ "qname": "sklearn.preprocessing._function_transformer.FunctionTransformer.__init__.inverse_func", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "callable, default=None", "description": "The callable to use for the inverse transformation. This will be\npassed the same arguments as inverse transform, with args and\nkwargs forwarded. If inverse_func is None, then inverse_func\nwill be the identity function." @@ -212412,7 +211584,7 @@ "qname": "sklearn.preprocessing._function_transformer.FunctionTransformer.__init__.validate", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Indicate that the input X array should be checked before calling\n``func``. The possibilities are:\n\n- If False, there is no input validation.\n- If True, then X will be converted to a 2-dimensional NumPy array or\n sparse matrix. If the conversion is not possible an exception is\n raised.\n\n.. versionchanged:: 0.22\n The default of ``validate`` changed from True to False." @@ -212428,7 +211600,7 @@ "qname": "sklearn.preprocessing._function_transformer.FunctionTransformer.__init__.accept_sparse", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Indicate that func accepts a sparse matrix as input. If validate is\nFalse, this has no effect. Otherwise, if accept_sparse is false,\nsparse matrix inputs will cause an exception to be raised." @@ -212444,7 +211616,7 @@ "qname": "sklearn.preprocessing._function_transformer.FunctionTransformer.__init__.check_inverse", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to check that or ``func`` followed by ``inverse_func`` leads to\nthe original inputs. It can be used for a sanity check, raising a\nwarning when the condition is not fulfilled.\n\n.. versionadded:: 0.20" @@ -212460,7 +211632,7 @@ "qname": "sklearn.preprocessing._function_transformer.FunctionTransformer.__init__.feature_names_out", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "callable, 'one-to-one' or None, default=None", "description": "Determines the list of feature names that will be returned by the\n`get_feature_names_out` method. If it is 'one-to-one', then the output\nfeature names will be equal to the input feature names. If it is a\ncallable, then it must take two positional arguments: this\n`FunctionTransformer` (`self`) and an array-like of input feature names\n(`input_features`). It must return an array-like of output feature\nnames. The `get_feature_names_out` method is only defined if\n`feature_names_out` is not None.\n\nSee ``get_feature_names_out`` for more details.\n\n.. versionadded:: 1.1" @@ -212489,7 +211661,7 @@ "qname": "sklearn.preprocessing._function_transformer.FunctionTransformer.__init__.kw_args", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict, default=None", "description": "Dictionary of additional keyword arguments to pass to func.\n\n.. versionadded:: 0.18" @@ -212505,7 +211677,7 @@ "qname": "sklearn.preprocessing._function_transformer.FunctionTransformer.__init__.inv_kw_args", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict, default=None", "description": "Dictionary of additional keyword arguments to pass to inverse_func.\n\n.. versionadded:: 0.18" @@ -212517,7 +211689,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -212533,7 +211705,7 @@ "qname": "sklearn.preprocessing._function_transformer.FunctionTransformer.__sklearn_is_fitted__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -212542,7 +211714,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Return True since FunctionTransfomer is stateless.", "docstring": "Return True since FunctionTransfomer is stateless." }, @@ -212736,7 +211908,7 @@ "qname": "sklearn.preprocessing._function_transformer.FunctionTransformer.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -212749,7 +211921,7 @@ "qname": "sklearn.preprocessing._function_transformer.FunctionTransformer.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like, shape (n_samples, n_features)", "description": "Input array." @@ -212774,7 +211946,7 @@ "qname": "sklearn.preprocessing._function_transformer.FunctionTransformer.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present here for API consistency by convention." @@ -212786,7 +211958,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit transformer by checking X.\n\nIf ``validate`` is ``True``, ``X`` will be checked.", "docstring": "Fit transformer by checking X.\n\n If ``validate`` is ``True``, ``X`` will be checked.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Input array.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n self : object\n FunctionTransformer class instance.\n " }, @@ -212802,7 +211974,7 @@ "qname": "sklearn.preprocessing._function_transformer.FunctionTransformer.get_feature_names_out.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -212815,7 +211987,7 @@ "qname": "sklearn.preprocessing._function_transformer.FunctionTransformer.get_feature_names_out.input_features", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of str or None, default=None", "description": "Input feature names.\n\n- If `input_features` is None, then `feature_names_in_` is\n used as the input feature names. If `feature_names_in_` is not\n defined, then names are generated:\n `[x0, x1, ..., x(n_features_in_ - 1)]`.\n- If `input_features` is array-like, then `input_features` must\n match `feature_names_in_` if `feature_names_in_` is defined." @@ -212836,7 +212008,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Get output feature names for transformation.\n\nThis method is only defined if `feature_names_out` is not None.", "docstring": "Get output feature names for transformation.\n\n This method is only defined if `feature_names_out` is not None.\n\n Parameters\n ----------\n input_features : array-like of str or None, default=None\n Input feature names.\n\n - If `input_features` is None, then `feature_names_in_` is\n used as the input feature names. If `feature_names_in_` is not\n defined, then names are generated:\n `[x0, x1, ..., x(n_features_in_ - 1)]`.\n - If `input_features` is array-like, then `input_features` must\n match `feature_names_in_` if `feature_names_in_` is defined.\n\n Returns\n -------\n feature_names_out : ndarray of str objects\n Transformed feature names.\n\n - If `feature_names_out` is 'one-to-one', the input feature names\n are returned (see `input_features` above). This requires\n `feature_names_in_` and/or `n_features_in_` to be defined, which\n is done automatically if `validate=True`. Alternatively, you can\n set them in `func`.\n - If `feature_names_out` is a callable, then it is called with two\n arguments, `self` and `input_features`, and its return value is\n returned by this method.\n " }, @@ -212852,7 +212024,7 @@ "qname": "sklearn.preprocessing._function_transformer.FunctionTransformer.inverse_transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -212865,7 +212037,7 @@ "qname": "sklearn.preprocessing._function_transformer.FunctionTransformer.inverse_transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like, shape (n_samples, n_features)", "description": "Input array." @@ -212886,7 +212058,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Transform X using the inverse function.", "docstring": "Transform X using the inverse function.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Input array.\n\n Returns\n -------\n X_out : array-like, shape (n_samples, n_features)\n Transformed input.\n " }, @@ -212902,7 +212074,7 @@ "qname": "sklearn.preprocessing._function_transformer.FunctionTransformer.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -212915,7 +212087,7 @@ "qname": "sklearn.preprocessing._function_transformer.FunctionTransformer.transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like, shape (n_samples, n_features)", "description": "Input array." @@ -212936,7 +212108,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Transform X using the forward function.", "docstring": "Transform X using the forward function.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Input array.\n\n Returns\n -------\n X_out : array-like, shape (n_samples, n_features)\n Transformed input.\n " }, @@ -212977,7 +212149,7 @@ "qname": "sklearn.preprocessing._label.LabelBinarizer.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -212990,7 +212162,7 @@ "qname": "sklearn.preprocessing._label.LabelBinarizer.__init__.neg_label", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "Value with which negative labels must be encoded." @@ -213006,7 +212178,7 @@ "qname": "sklearn.preprocessing._label.LabelBinarizer.__init__.pos_label", "default_value": "1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1", "description": "Value with which positive labels must be encoded." @@ -213022,7 +212194,7 @@ "qname": "sklearn.preprocessing._label.LabelBinarizer.__init__.sparse_output", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "True if the returned array from transform is desired to be in sparse\nCSR format." @@ -213034,7 +212206,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -213075,7 +212247,7 @@ "qname": "sklearn.preprocessing._label.LabelBinarizer.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -213088,7 +212260,7 @@ "qname": "sklearn.preprocessing._label.LabelBinarizer.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (n_samples,) or (n_samples, n_classes)", "description": "Target values. The 2-d matrix should only contain 0 and 1,\nrepresents multilabel classification." @@ -213100,7 +212272,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit label binarizer.", "docstring": "Fit label binarizer.\n\n Parameters\n ----------\n y : ndarray of shape (n_samples,) or (n_samples, n_classes)\n Target values. The 2-d matrix should only contain 0 and 1,\n represents multilabel classification.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n " }, @@ -213116,7 +212288,7 @@ "qname": "sklearn.preprocessing._label.LabelBinarizer.fit_transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -213129,7 +212301,7 @@ "qname": "sklearn.preprocessing._label.LabelBinarizer.fit_transform.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{ndarray, sparse matrix} of shape (n_samples,) or (n_samples, n_classes)", "description": "Target values. The 2-d matrix should only contain 0 and 1,\nrepresents multilabel classification. Sparse matrix can be\nCSR, CSC, COO, DOK, or LIL." @@ -213150,7 +212322,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit label binarizer/transform multi-class labels to binary labels.\n\nThe output of transform is sometimes referred to as\nthe 1-of-K coding scheme.", "docstring": "Fit label binarizer/transform multi-class labels to binary labels.\n\n The output of transform is sometimes referred to as\n the 1-of-K coding scheme.\n\n Parameters\n ----------\n y : {ndarray, sparse matrix} of shape (n_samples,) or (n_samples, n_classes)\n Target values. The 2-d matrix should only contain 0 and 1,\n represents multilabel classification. Sparse matrix can be\n CSR, CSC, COO, DOK, or LIL.\n\n Returns\n -------\n Y : {ndarray, sparse matrix} of shape (n_samples, n_classes)\n Shape will be (n_samples, 1) for binary problems. Sparse matrix\n will be of CSR format.\n " }, @@ -213166,7 +212338,7 @@ "qname": "sklearn.preprocessing._label.LabelBinarizer.inverse_transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -213179,7 +212351,7 @@ "qname": "sklearn.preprocessing._label.LabelBinarizer.inverse_transform.Y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{ndarray, sparse matrix} of shape (n_samples, n_classes)", "description": "Target values. All sparse matrices are converted to CSR before\ninverse transformation." @@ -213204,7 +212376,7 @@ "qname": "sklearn.preprocessing._label.LabelBinarizer.inverse_transform.threshold", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=None", "description": "Threshold used in the binary and multi-label cases.\n\nUse 0 when ``Y`` contains the output of decision_function\n(classifier).\nUse 0.5 when ``Y`` contains the output of predict_proba.\n\nIf None, the threshold is assumed to be half way between\nneg_label and pos_label." @@ -213216,7 +212388,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Transform binary labels back to multi-class labels.", "docstring": "Transform binary labels back to multi-class labels.\n\n Parameters\n ----------\n Y : {ndarray, sparse matrix} of shape (n_samples, n_classes)\n Target values. All sparse matrices are converted to CSR before\n inverse transformation.\n\n threshold : float, default=None\n Threshold used in the binary and multi-label cases.\n\n Use 0 when ``Y`` contains the output of decision_function\n (classifier).\n Use 0.5 when ``Y`` contains the output of predict_proba.\n\n If None, the threshold is assumed to be half way between\n neg_label and pos_label.\n\n Returns\n -------\n y : {ndarray, sparse matrix} of shape (n_samples,)\n Target values. Sparse matrix will be of CSR format.\n\n Notes\n -----\n In the case when the binary labels are fractional\n (probabilistic), inverse_transform chooses the class with the\n greatest value. Typically, this allows to use the output of a\n linear model's decision_function method directly as the input\n of inverse_transform.\n " }, @@ -213232,7 +212404,7 @@ "qname": "sklearn.preprocessing._label.LabelBinarizer.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -213245,7 +212417,7 @@ "qname": "sklearn.preprocessing._label.LabelBinarizer.transform.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array, sparse matrix} of shape (n_samples,) or (n_samples, n_classes)", "description": "Target values. The 2-d matrix should only contain 0 and 1,\nrepresents multilabel classification. Sparse matrix can be\nCSR, CSC, COO, DOK, or LIL." @@ -213266,7 +212438,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Transform multi-class labels to binary labels.\n\nThe output of transform is sometimes referred to by some authors as\nthe 1-of-K coding scheme.", "docstring": "Transform multi-class labels to binary labels.\n\n The output of transform is sometimes referred to by some authors as\n the 1-of-K coding scheme.\n\n Parameters\n ----------\n y : {array, sparse matrix} of shape (n_samples,) or (n_samples, n_classes)\n Target values. The 2-d matrix should only contain 0 and 1,\n represents multilabel classification. Sparse matrix can be\n CSR, CSC, COO, DOK, or LIL.\n\n Returns\n -------\n Y : {ndarray, sparse matrix} of shape (n_samples, n_classes)\n Shape will be (n_samples, 1) for binary problems. Sparse matrix\n will be of CSR format.\n " }, @@ -213307,7 +212479,7 @@ "qname": "sklearn.preprocessing._label.LabelEncoder.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -213320,7 +212492,7 @@ "qname": "sklearn.preprocessing._label.LabelEncoder.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "Target values." @@ -213332,7 +212504,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit label encoder.", "docstring": "Fit label encoder.\n\n Parameters\n ----------\n y : array-like of shape (n_samples,)\n Target values.\n\n Returns\n -------\n self : returns an instance of self.\n Fitted label encoder.\n " }, @@ -213348,7 +212520,7 @@ "qname": "sklearn.preprocessing._label.LabelEncoder.fit_transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -213361,7 +212533,7 @@ "qname": "sklearn.preprocessing._label.LabelEncoder.fit_transform.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "Target values." @@ -213373,7 +212545,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit label encoder and return encoded labels.", "docstring": "Fit label encoder and return encoded labels.\n\n Parameters\n ----------\n y : array-like of shape (n_samples,)\n Target values.\n\n Returns\n -------\n y : array-like of shape (n_samples,)\n Encoded labels.\n " }, @@ -213389,7 +212561,7 @@ "qname": "sklearn.preprocessing._label.LabelEncoder.inverse_transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -213402,7 +212574,7 @@ "qname": "sklearn.preprocessing._label.LabelEncoder.inverse_transform.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "ndarray of shape (n_samples,)", "description": "Target values." @@ -213414,7 +212586,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Transform labels back to original encoding.", "docstring": "Transform labels back to original encoding.\n\n Parameters\n ----------\n y : ndarray of shape (n_samples,)\n Target values.\n\n Returns\n -------\n y : ndarray of shape (n_samples,)\n Original encoding.\n " }, @@ -213430,7 +212602,7 @@ "qname": "sklearn.preprocessing._label.LabelEncoder.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -213443,7 +212615,7 @@ "qname": "sklearn.preprocessing._label.LabelEncoder.transform.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "Target values." @@ -213455,7 +212627,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Transform labels to normalized encoding.", "docstring": "Transform labels to normalized encoding.\n\n Parameters\n ----------\n y : array-like of shape (n_samples,)\n Target values.\n\n Returns\n -------\n y : array-like of shape (n_samples,)\n Labels as normalized encodings.\n " }, @@ -213471,7 +212643,7 @@ "qname": "sklearn.preprocessing._label.MultiLabelBinarizer.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -213484,7 +212656,7 @@ "qname": "sklearn.preprocessing._label.MultiLabelBinarizer.__init__.classes", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_classes,), default=None", "description": "Indicates an ordering for the class labels.\nAll entries should be unique (cannot contain duplicate classes)." @@ -213500,7 +212672,7 @@ "qname": "sklearn.preprocessing._label.MultiLabelBinarizer.__init__.sparse_output", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Set to True if output binary array is desired in CSR sparse format." @@ -213512,7 +212684,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -213635,7 +212807,7 @@ "qname": "sklearn.preprocessing._label.MultiLabelBinarizer.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -213648,7 +212820,7 @@ "qname": "sklearn.preprocessing._label.MultiLabelBinarizer.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "iterable of iterables", "description": "A set of labels (any orderable and hashable object) for each\nsample. If the `classes` parameter is set, `y` will not be\niterated." @@ -213660,7 +212832,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the label sets binarizer, storing :term:`classes_`.", "docstring": "Fit the label sets binarizer, storing :term:`classes_`.\n\n Parameters\n ----------\n y : iterable of iterables\n A set of labels (any orderable and hashable object) for each\n sample. If the `classes` parameter is set, `y` will not be\n iterated.\n\n Returns\n -------\n self : object\n Fitted estimator.\n " }, @@ -213676,7 +212848,7 @@ "qname": "sklearn.preprocessing._label.MultiLabelBinarizer.fit_transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -213689,7 +212861,7 @@ "qname": "sklearn.preprocessing._label.MultiLabelBinarizer.fit_transform.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "iterable of iterables", "description": "A set of labels (any orderable and hashable object) for each\nsample. If the `classes` parameter is set, `y` will not be\niterated." @@ -213701,7 +212873,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the label sets binarizer and transform the given label sets.", "docstring": "Fit the label sets binarizer and transform the given label sets.\n\n Parameters\n ----------\n y : iterable of iterables\n A set of labels (any orderable and hashable object) for each\n sample. If the `classes` parameter is set, `y` will not be\n iterated.\n\n Returns\n -------\n y_indicator : {ndarray, sparse matrix} of shape (n_samples, n_classes)\n A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]`\n is in `y[i]`, and 0 otherwise. Sparse matrix will be of CSR\n format.\n " }, @@ -213717,7 +212889,7 @@ "qname": "sklearn.preprocessing._label.MultiLabelBinarizer.inverse_transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -213730,7 +212902,7 @@ "qname": "sklearn.preprocessing._label.MultiLabelBinarizer.inverse_transform.yt", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{ndarray, sparse matrix} of shape (n_samples, n_classes)", "description": "A matrix containing only 1s ands 0s." @@ -213751,7 +212923,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Transform the given indicator matrix into label sets.", "docstring": "Transform the given indicator matrix into label sets.\n\n Parameters\n ----------\n yt : {ndarray, sparse matrix} of shape (n_samples, n_classes)\n A matrix containing only 1s ands 0s.\n\n Returns\n -------\n y : list of tuples\n The set of labels for each sample such that `y[i]` consists of\n `classes_[j]` for each `yt[i, j] == 1`.\n " }, @@ -213767,7 +212939,7 @@ "qname": "sklearn.preprocessing._label.MultiLabelBinarizer.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -213780,7 +212952,7 @@ "qname": "sklearn.preprocessing._label.MultiLabelBinarizer.transform.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "iterable of iterables", "description": "A set of labels (any orderable and hashable object) for each\nsample. If the `classes` parameter is set, `y` will not be\niterated." @@ -213792,7 +212964,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Transform the given label sets.", "docstring": "Transform the given label sets.\n\n Parameters\n ----------\n y : iterable of iterables\n A set of labels (any orderable and hashable object) for each\n sample. If the `classes` parameter is set, `y` will not be\n iterated.\n\n Returns\n -------\n y_indicator : array or CSR matrix, shape (n_samples, n_classes)\n A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in\n `y[i]`, and 0 otherwise.\n " }, @@ -214002,7 +213174,7 @@ "qname": "sklearn.preprocessing._polynomial.PolynomialFeatures.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -214015,7 +213187,7 @@ "qname": "sklearn.preprocessing._polynomial.PolynomialFeatures.__init__.degree", "default_value": "2", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int or tuple (min_degree, max_degree), default=2", "description": "If a single int is given, it specifies the maximal degree of the\npolynomial features. If a tuple `(min_degree, max_degree)` is passed,\nthen `min_degree` is the minimum and `max_degree` is the maximum\npolynomial degree of the generated features. Note that `min_degree=0`\nand `min_degree=1` are equivalent as outputting the degree zero term is\ndetermined by `include_bias`." @@ -214040,7 +213212,7 @@ "qname": "sklearn.preprocessing._polynomial.PolynomialFeatures.__init__.interaction_only", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If `True`, only interaction features are produced: features that are\nproducts of at most `degree` *distinct* input features, i.e. terms with\npower of 2 or higher of the same input feature are excluded:\n\n - included: `x[0]`, `x[1]`, `x[0] * x[1]`, etc.\n - excluded: `x[0] ** 2`, `x[0] ** 2 * x[1]`, etc." @@ -214056,7 +213228,7 @@ "qname": "sklearn.preprocessing._polynomial.PolynomialFeatures.__init__.include_bias", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If `True` (default), then include a bias column, the feature in which\nall polynomial powers are zero (i.e. a column of ones - acts as an\nintercept term in a linear model)." @@ -214072,19 +213244,19 @@ "qname": "sklearn.preprocessing._polynomial.PolynomialFeatures.__init__.order", "default_value": "'C'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'C', 'F'}, default='C'", "description": "Order of output array in the dense case. `'F'` order is faster to\ncompute, but may slow down subsequent estimators.\n\n.. versionadded:: 0.21" }, "type": { "kind": "EnumType", - "values": ["C", "F"] + "values": ["F", "C"] } } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -214254,7 +213426,7 @@ "qname": "sklearn.preprocessing._polynomial.PolynomialFeatures.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -214267,7 +213439,7 @@ "qname": "sklearn.preprocessing._polynomial.PolynomialFeatures.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The data." @@ -214292,7 +213464,7 @@ "qname": "sklearn.preprocessing._polynomial.PolynomialFeatures.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present here for API consistency by convention." @@ -214304,7 +213476,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute number of output features.", "docstring": "\n Compute number of output features.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n self : object\n Fitted transformer.\n " }, @@ -214322,7 +213494,7 @@ "qname": "sklearn.preprocessing._polynomial.PolynomialFeatures.get_feature_names.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -214335,7 +213507,7 @@ "qname": "sklearn.preprocessing._polynomial.PolynomialFeatures.get_feature_names.input_features", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "list of str of shape (n_features,), default=None", "description": "String names for input features if available. By default,\n\"x0\", \"x1\", ... \"xn_features\" is used." @@ -214347,7 +213519,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Return feature names for output features.", "docstring": "Return feature names for output features.\n\n Parameters\n ----------\n input_features : list of str of shape (n_features,), default=None\n String names for input features if available. By default,\n \"x0\", \"x1\", ... \"xn_features\" is used.\n\n Returns\n -------\n output_feature_names : list of str of shape (n_output_features,)\n Transformed feature names.\n " }, @@ -214363,7 +213535,7 @@ "qname": "sklearn.preprocessing._polynomial.PolynomialFeatures.get_feature_names_out.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -214376,7 +213548,7 @@ "qname": "sklearn.preprocessing._polynomial.PolynomialFeatures.get_feature_names_out.input_features", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of str or None, default=None", "description": "Input features.\n\n- If `input_features is None`, then `feature_names_in_` is\n used as feature names in. If `feature_names_in_` is not defined,\n then the following input feature names are generated:\n `[\"x0\", \"x1\", ..., \"x(n_features_in_ - 1)\"]`.\n- If `input_features` is an array-like, then `input_features` must\n match `feature_names_in_` if `feature_names_in_` is defined." @@ -214397,7 +213569,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Get output feature names for transformation.", "docstring": "Get output feature names for transformation.\n\n Parameters\n ----------\n input_features : array-like of str or None, default=None\n Input features.\n\n - If `input_features is None`, then `feature_names_in_` is\n used as feature names in. If `feature_names_in_` is not defined,\n then the following input feature names are generated:\n `[\"x0\", \"x1\", ..., \"x(n_features_in_ - 1)\"]`.\n - If `input_features` is an array-like, then `input_features` must\n match `feature_names_in_` if `feature_names_in_` is defined.\n\n Returns\n -------\n feature_names_out : ndarray of str objects\n Transformed feature names.\n " }, @@ -214416,7 +213588,7 @@ "qname": "sklearn.preprocessing._polynomial.PolynomialFeatures.n_input_features_.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -214425,7 +213597,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -214441,7 +213613,7 @@ "qname": "sklearn.preprocessing._polynomial.PolynomialFeatures.powers_.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -214450,7 +213622,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Exponent for each of the inputs in the output.", "docstring": "Exponent for each of the inputs in the output." }, @@ -214466,7 +213638,7 @@ "qname": "sklearn.preprocessing._polynomial.PolynomialFeatures.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -214479,7 +213651,7 @@ "qname": "sklearn.preprocessing._polynomial.PolynomialFeatures.transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The data to transform, row by row.\n\nPrefer CSR over CSC for sparse input (for speed), but CSC is\nrequired if the degree is 4 or higher. If the degree is less than\n4 and the input format is CSC, it will be converted to CSR, have\nits polynomial features generated, then converted back to CSC.\n\nIf the degree is 2 or 3, the method described in \"Leveraging\nSparsity to Speed Up Polynomial Feature Expansions of CSR Matrices\nUsing K-Simplex Numbers\" by Andrew Nystrom and John Hughes is\nused, which is much faster than the method used on CSC input. For\nthis reason, a CSC input will be converted to CSR, and the output\nwill be converted back to CSC prior to being returned, hence the\npreference of CSR." @@ -214500,7 +213672,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Transform data to polynomial features.", "docstring": "Transform data to polynomial features.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data to transform, row by row.\n\n Prefer CSR over CSC for sparse input (for speed), but CSC is\n required if the degree is 4 or higher. If the degree is less than\n 4 and the input format is CSC, it will be converted to CSR, have\n its polynomial features generated, then converted back to CSC.\n\n If the degree is 2 or 3, the method described in \"Leveraging\n Sparsity to Speed Up Polynomial Feature Expansions of CSR Matrices\n Using K-Simplex Numbers\" by Andrew Nystrom and John Hughes is\n used, which is much faster than the method used on CSC input. For\n this reason, a CSC input will be converted to CSR, and the output\n will be converted back to CSC prior to being returned, hence the\n preference of CSR.\n\n Returns\n -------\n XP : {ndarray, sparse matrix} of shape (n_samples, NP)\n The matrix of features, where `NP` is the number of polynomial\n features generated from the combination of inputs. If a sparse\n matrix is provided, it will be converted into a sparse\n `csr_matrix`.\n " }, @@ -214516,7 +213688,7 @@ "qname": "sklearn.preprocessing._polynomial.SplineTransformer.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -214529,7 +213701,7 @@ "qname": "sklearn.preprocessing._polynomial.SplineTransformer.__init__.n_knots", "default_value": "5", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=5", "description": "Number of knots of the splines if `knots` equals one of\n{'uniform', 'quantile'}. Must be larger or equal 2. Ignored if `knots`\nis array-like." @@ -214545,7 +213717,7 @@ "qname": "sklearn.preprocessing._polynomial.SplineTransformer.__init__.degree", "default_value": "3", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=3", "description": "The polynomial degree of the spline basis. Must be a non-negative\ninteger." @@ -214561,7 +213733,7 @@ "qname": "sklearn.preprocessing._polynomial.SplineTransformer.__init__.knots", "default_value": "'uniform'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'uniform', 'quantile'} or array-like of shape (n_knots, n_features), default='uniform'", "description": "Set knot positions such that first knot <= features <= last knot.\n\n- If 'uniform', `n_knots` number of knots are distributed uniformly\n from min to max values of the features.\n- If 'quantile', they are distributed uniformly along the quantiles of\n the features.\n- If an array-like is given, it directly specifies the sorted knot\n positions including the boundary knots. Note that, internally,\n `degree` number of knots are added before the first knot, the same\n after the last knot." @@ -214586,14 +213758,14 @@ "qname": "sklearn.preprocessing._polynomial.SplineTransformer.__init__.extrapolation", "default_value": "'constant'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'error', 'constant', 'linear', 'continue', 'periodic'}, default='constant'", "description": "If 'error', values outside the min and max values of the training\nfeatures raises a `ValueError`. If 'constant', the value of the\nsplines at minimum and maximum value of the features is used as\nconstant extrapolation. If 'linear', a linear extrapolation is used.\nIf 'continue', the splines are extrapolated as is, i.e. option\n`extrapolate=True` in :class:`scipy.interpolate.BSpline`. If\n'periodic', periodic splines with a periodicity equal to the distance\nbetween the first and last knot are used. Periodic splines enforce\nequal function values and derivatives at the first and last knot.\nFor example, this makes it possible to avoid introducing an arbitrary\njump between Dec 31st and Jan 1st in spline features derived from a\nnaturally periodic \"day-of-year\" input feature. In this case it is\nrecommended to manually set the knot values to control the period." }, "type": { "kind": "EnumType", - "values": ["continue", "constant", "linear", "error", "periodic"] + "values": ["constant", "continue", "periodic", "linear", "error"] } }, { @@ -214602,7 +213774,7 @@ "qname": "sklearn.preprocessing._polynomial.SplineTransformer.__init__.include_bias", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "If True (default), then the last spline element inside the data range\nof a feature is dropped. As B-splines sum to one over the spline basis\nfunctions for each data point, they implicitly include a bias term,\ni.e. a column of ones. It acts as an intercept term in a linear models." @@ -214618,19 +213790,19 @@ "qname": "sklearn.preprocessing._polynomial.SplineTransformer.__init__.order", "default_value": "'C'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'C', 'F'}, default='C'", "description": "Order of output array. 'F' order is faster to compute, but may slow\ndown subsequent estimators." }, "type": { "kind": "EnumType", - "values": ["C", "F"] + "values": ["F", "C"] } } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -214710,7 +213882,7 @@ "qname": "sklearn.preprocessing._polynomial.SplineTransformer.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -214723,7 +213895,7 @@ "qname": "sklearn.preprocessing._polynomial.SplineTransformer.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "The data." @@ -214739,7 +213911,7 @@ "qname": "sklearn.preprocessing._polynomial.SplineTransformer.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "None", "description": "Ignored." @@ -214755,7 +213927,7 @@ "qname": "sklearn.preprocessing._polynomial.SplineTransformer.fit.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default = None", "description": "Individual weights for each sample. Used to calculate quantiles if\n`knots=\"quantile\"`. For `knots=\"uniform\"`, zero weighted\nobservations are ignored for finding the min and max of `X`." @@ -214776,7 +213948,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute knot positions of splines.", "docstring": "Compute knot positions of splines.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The data.\n\n y : None\n Ignored.\n\n sample_weight : array-like of shape (n_samples,), default = None\n Individual weights for each sample. Used to calculate quantiles if\n `knots=\"quantile\"`. For `knots=\"uniform\"`, zero weighted\n observations are ignored for finding the min and max of `X`.\n\n Returns\n -------\n self : object\n Fitted transformer.\n " }, @@ -214794,7 +213966,7 @@ "qname": "sklearn.preprocessing._polynomial.SplineTransformer.get_feature_names.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -214807,7 +213979,7 @@ "qname": "sklearn.preprocessing._polynomial.SplineTransformer.get_feature_names.input_features", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "list of str of shape (n_features,), default=None", "description": "String names for input features if available. By default,\n\"x0\", \"x1\", ... \"xn_features\" is used." @@ -214819,7 +213991,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Return feature names for output features.", "docstring": "Return feature names for output features.\n\n Parameters\n ----------\n input_features : list of str of shape (n_features,), default=None\n String names for input features if available. By default,\n \"x0\", \"x1\", ... \"xn_features\" is used.\n\n Returns\n -------\n output_feature_names : list of str of shape (n_output_features,)\n Transformed feature names.\n " }, @@ -214835,7 +214007,7 @@ "qname": "sklearn.preprocessing._polynomial.SplineTransformer.get_feature_names_out.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -214848,7 +214020,7 @@ "qname": "sklearn.preprocessing._polynomial.SplineTransformer.get_feature_names_out.input_features", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of str or None, default=None", "description": "Input features.\n\n- If `input_features` is `None`, then `feature_names_in_` is\n used as feature names in. If `feature_names_in_` is not defined,\n then the following input feature names are generated:\n `[\"x0\", \"x1\", ..., \"x(n_features_in_ - 1)\"]`.\n- If `input_features` is an array-like, then `input_features` must\n match `feature_names_in_` if `feature_names_in_` is defined." @@ -214869,7 +214041,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Get output feature names for transformation.", "docstring": "Get output feature names for transformation.\n\n Parameters\n ----------\n input_features : array-like of str or None, default=None\n Input features.\n\n - If `input_features` is `None`, then `feature_names_in_` is\n used as feature names in. If `feature_names_in_` is not defined,\n then the following input feature names are generated:\n `[\"x0\", \"x1\", ..., \"x(n_features_in_ - 1)\"]`.\n - If `input_features` is an array-like, then `input_features` must\n match `feature_names_in_` if `feature_names_in_` is defined.\n\n Returns\n -------\n feature_names_out : ndarray of str objects\n Transformed feature names.\n " }, @@ -214885,7 +214057,7 @@ "qname": "sklearn.preprocessing._polynomial.SplineTransformer.transform.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -214898,7 +214070,7 @@ "qname": "sklearn.preprocessing._polynomial.SplineTransformer.transform.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "The data to transform." @@ -214910,7 +214082,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Transform each feature data to B-splines.", "docstring": "Transform each feature data to B-splines.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The data to transform.\n\n Returns\n -------\n XBS : ndarray of shape (n_samples, n_features * n_splines)\n The matrix of features, where n_splines is the number of bases\n elements of the B-splines, n_knots + degree - 1.\n " }, @@ -216110,7 +215282,7 @@ "types": [ { "kind": "EnumType", - "values": ["knn", "rbf"] + "values": ["rbf", "knn"] }, { "kind": "NamedType", @@ -216433,7 +215605,7 @@ "qname": "sklearn.semi_supervised._label_propagation.LabelPropagation.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -216446,7 +215618,7 @@ "qname": "sklearn.semi_supervised._label_propagation.LabelPropagation.__init__.kernel", "default_value": "'rbf'", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{'knn', 'rbf'} or callable, default='rbf'", "description": "String identifier for kernel function to use or the kernel function\nitself. Only 'rbf' and 'knn' strings are valid inputs. The function\npassed should take two inputs, each of shape (n_samples, n_features),\nand return a (n_samples, n_samples) shaped weight matrix." @@ -216456,7 +215628,7 @@ "types": [ { "kind": "EnumType", - "values": ["knn", "rbf"] + "values": ["rbf", "knn"] }, { "kind": "NamedType", @@ -216471,7 +215643,7 @@ "qname": "sklearn.semi_supervised._label_propagation.LabelPropagation.__init__.gamma", "default_value": "20", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=20", "description": "Parameter for rbf kernel." @@ -216487,7 +215659,7 @@ "qname": "sklearn.semi_supervised._label_propagation.LabelPropagation.__init__.n_neighbors", "default_value": "7", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=7", "description": "Parameter for knn kernel which need to be strictly positive." @@ -216503,7 +215675,7 @@ "qname": "sklearn.semi_supervised._label_propagation.LabelPropagation.__init__.max_iter", "default_value": "1000", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1000", "description": "Change maximum number of iterations allowed." @@ -216519,7 +215691,7 @@ "qname": "sklearn.semi_supervised._label_propagation.LabelPropagation.__init__.tol", "default_value": "0.001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, 1e-3", "description": "Convergence tolerance: threshold to consider the system at steady\nstate." @@ -216544,7 +215716,7 @@ "qname": "sklearn.semi_supervised._label_propagation.LabelPropagation.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "The number of parallel jobs to run.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details." @@ -216556,7 +215728,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -216597,7 +215769,7 @@ "qname": "sklearn.semi_supervised._label_propagation.LabelPropagation.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -216610,7 +215782,7 @@ "qname": "sklearn.semi_supervised._label_propagation.LabelPropagation.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "Training data, where `n_samples` is the number of samples\nand `n_features` is the number of features." @@ -216626,7 +215798,7 @@ "qname": "sklearn.semi_supervised._label_propagation.LabelPropagation.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "Target class values with unlabeled points marked as -1.\nAll unlabeled samples will be transductively assigned labels\ninternally." @@ -216638,7 +215810,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit a semi-supervised label propagation model to X.", "docstring": "Fit a semi-supervised label propagation model to X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n y : array-like of shape (n_samples,)\n Target class values with unlabeled points marked as -1.\n All unlabeled samples will be transductively assigned labels\n internally.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n " }, @@ -216654,7 +215826,7 @@ "qname": "sklearn.semi_supervised._label_propagation.LabelSpreading.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -216667,7 +215839,7 @@ "qname": "sklearn.semi_supervised._label_propagation.LabelSpreading.__init__.kernel", "default_value": "'rbf'", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{'knn', 'rbf'} or callable, default='rbf'", "description": "String identifier for kernel function to use or the kernel function\nitself. Only 'rbf' and 'knn' strings are valid inputs. The function\npassed should take two inputs, each of shape (n_samples, n_features),\nand return a (n_samples, n_samples) shaped weight matrix." @@ -216677,7 +215849,7 @@ "types": [ { "kind": "EnumType", - "values": ["knn", "rbf"] + "values": ["rbf", "knn"] }, { "kind": "NamedType", @@ -216692,7 +215864,7 @@ "qname": "sklearn.semi_supervised._label_propagation.LabelSpreading.__init__.gamma", "default_value": "20", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=20", "description": "Parameter for rbf kernel." @@ -216708,7 +215880,7 @@ "qname": "sklearn.semi_supervised._label_propagation.LabelSpreading.__init__.n_neighbors", "default_value": "7", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=7", "description": "Parameter for knn kernel which is a strictly positive integer." @@ -216724,7 +215896,7 @@ "qname": "sklearn.semi_supervised._label_propagation.LabelSpreading.__init__.alpha", "default_value": "0.2", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.2", "description": "Clamping factor. A value in (0, 1) that specifies the relative amount\nthat an instance should adopt the information from its neighbors as\nopposed to its initial label.\nalpha=0 means keeping the initial label information; alpha=1 means\nreplacing all initial information." @@ -216740,7 +215912,7 @@ "qname": "sklearn.semi_supervised._label_propagation.LabelSpreading.__init__.max_iter", "default_value": "30", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=30", "description": "Maximum number of iterations allowed." @@ -216756,7 +215928,7 @@ "qname": "sklearn.semi_supervised._label_propagation.LabelSpreading.__init__.tol", "default_value": "0.001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-3", "description": "Convergence tolerance: threshold to consider the system at steady\nstate." @@ -216772,7 +215944,7 @@ "qname": "sklearn.semi_supervised._label_propagation.LabelSpreading.__init__.n_jobs", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "The number of parallel jobs to run.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details." @@ -216784,7 +215956,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -216825,7 +215997,7 @@ "qname": "sklearn.semi_supervised._self_training.SelfTrainingClassifier.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -216838,7 +216010,7 @@ "qname": "sklearn.semi_supervised._self_training.SelfTrainingClassifier.__init__.base_estimator", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "estimator object", "description": "An estimator object implementing `fit` and `predict_proba`.\nInvoking the `fit` method will fit a clone of the passed estimator,\nwhich will be stored in the `base_estimator_` attribute." @@ -216854,7 +216026,7 @@ "qname": "sklearn.semi_supervised._self_training.SelfTrainingClassifier.__init__.threshold", "default_value": "0.75", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.75", "description": "The decision threshold for use with `criterion='threshold'`.\nShould be in [0, 1). When using the `'threshold'` criterion, a\n:ref:`well calibrated classifier ` should be used." @@ -216870,14 +216042,14 @@ "qname": "sklearn.semi_supervised._self_training.SelfTrainingClassifier.__init__.criterion", "default_value": "'threshold'", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{'threshold', 'k_best'}, default='threshold'", "description": "The selection criterion used to select which labels to add to the\ntraining set. If `'threshold'`, pseudo-labels with prediction\nprobabilities above `threshold` are added to the dataset. If `'k_best'`,\nthe `k_best` pseudo-labels with highest prediction probabilities are\nadded to the dataset. When using the 'threshold' criterion, a\n:ref:`well calibrated classifier ` should be used." }, "type": { "kind": "EnumType", - "values": ["threshold", "k_best"] + "values": ["k_best", "threshold"] } }, { @@ -216886,7 +216058,7 @@ "qname": "sklearn.semi_supervised._self_training.SelfTrainingClassifier.__init__.k_best", "default_value": "10", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=10", "description": "The amount of samples to add in each iteration. Only used when\n`criterion='k_best'`." @@ -216902,7 +216074,7 @@ "qname": "sklearn.semi_supervised._self_training.SelfTrainingClassifier.__init__.max_iter", "default_value": "10", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "int or None, default=10", "description": "Maximum number of iterations allowed. Should be greater than or equal\nto 0. If it is `None`, the classifier will continue to predict labels\nuntil no new pseudo-labels are added, or all unlabeled samples have\nbeen labeled." @@ -216927,7 +216099,7 @@ "qname": "sklearn.semi_supervised._self_training.SelfTrainingClassifier.__init__.verbose", "default_value": "False", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Enable verbose output." @@ -216939,7 +216111,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -216955,7 +216127,7 @@ "qname": "sklearn.semi_supervised._self_training.SelfTrainingClassifier.decision_function.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -216968,7 +216140,7 @@ "qname": "sklearn.semi_supervised._self_training.SelfTrainingClassifier.decision_function.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Array representing the data." @@ -216989,7 +216161,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Call decision function of the `base_estimator`.", "docstring": "Call decision function of the `base_estimator`.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Array representing the data.\n\n Returns\n -------\n y : ndarray of shape (n_samples, n_features)\n Result of the decision function of the `base_estimator`.\n " }, @@ -217005,7 +216177,7 @@ "qname": "sklearn.semi_supervised._self_training.SelfTrainingClassifier.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -217018,7 +216190,7 @@ "qname": "sklearn.semi_supervised._self_training.SelfTrainingClassifier.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Array representing the data." @@ -217043,7 +216215,7 @@ "qname": "sklearn.semi_supervised._self_training.SelfTrainingClassifier.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples,)", "description": "Array representing the labels. Unlabeled samples should have the\nlabel -1." @@ -217064,7 +216236,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit self-training classifier using `X`, `y` as training data.", "docstring": "\n Fit self-training classifier using `X`, `y` as training data.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Array representing the data.\n\n y : {array-like, sparse matrix} of shape (n_samples,)\n Array representing the labels. Unlabeled samples should have the\n label -1.\n\n Returns\n -------\n self : object\n Fitted estimator.\n " }, @@ -217080,7 +216252,7 @@ "qname": "sklearn.semi_supervised._self_training.SelfTrainingClassifier.predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -217093,7 +216265,7 @@ "qname": "sklearn.semi_supervised._self_training.SelfTrainingClassifier.predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Array representing the data." @@ -217114,7 +216286,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict the classes of `X`.", "docstring": "Predict the classes of `X`.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Array representing the data.\n\n Returns\n -------\n y : ndarray of shape (n_samples,)\n Array with predicted labels.\n " }, @@ -217130,7 +216302,7 @@ "qname": "sklearn.semi_supervised._self_training.SelfTrainingClassifier.predict_log_proba.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -217143,7 +216315,7 @@ "qname": "sklearn.semi_supervised._self_training.SelfTrainingClassifier.predict_log_proba.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Array representing the data." @@ -217164,7 +216336,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict log probability for each possible outcome.", "docstring": "Predict log probability for each possible outcome.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Array representing the data.\n\n Returns\n -------\n y : ndarray of shape (n_samples, n_features)\n Array with log prediction probabilities.\n " }, @@ -217180,7 +216352,7 @@ "qname": "sklearn.semi_supervised._self_training.SelfTrainingClassifier.predict_proba.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -217193,7 +216365,7 @@ "qname": "sklearn.semi_supervised._self_training.SelfTrainingClassifier.predict_proba.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Array representing the data." @@ -217214,7 +216386,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict probability for each possible outcome.", "docstring": "Predict probability for each possible outcome.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Array representing the data.\n\n Returns\n -------\n y : ndarray of shape (n_samples, n_features)\n Array with prediction probabilities.\n " }, @@ -217230,7 +216402,7 @@ "qname": "sklearn.semi_supervised._self_training.SelfTrainingClassifier.score.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -217243,7 +216415,7 @@ "qname": "sklearn.semi_supervised._self_training.SelfTrainingClassifier.score.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Array representing the data." @@ -217268,7 +216440,7 @@ "qname": "sklearn.semi_supervised._self_training.SelfTrainingClassifier.score.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "Array representing the labels." @@ -217280,7 +216452,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Call score on the `base_estimator`.", "docstring": "Call score on the `base_estimator`.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Array representing the data.\n\n y : array-like of shape (n_samples,)\n Array representing the labels.\n\n Returns\n -------\n score : float\n Result of calling score on the `base_estimator`.\n " }, @@ -218595,31 +217767,6 @@ "description": "", "docstring": null }, - { - "id": "sklearn/sklearn.svm._base/BaseSVC/_class_weight@getter", - "name": "_class_weight", - "qname": "sklearn.svm._base.BaseSVC._class_weight", - "decorators": ["property"], - "parameters": [ - { - "id": "sklearn/sklearn.svm._base/BaseSVC/_class_weight/self", - "name": "self", - "qname": "sklearn.svm._base.BaseSVC._class_weight.self", - "default_value": null, - "assigned_by": "IMPLICIT", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - } - ], - "results": [], - "is_public": false, - "description": "Weights per class", - "docstring": "Weights per class" - }, { "id": "sklearn/sklearn.svm._base/BaseSVC/_dense_predict_proba", "name": "_dense_predict_proba", @@ -219224,7 +218371,7 @@ }, "type": { "kind": "EnumType", - "values": ["ovr", "crammer_singer"] + "values": ["crammer_singer", "ovr"] } }, { @@ -219240,7 +218387,7 @@ }, "type": { "kind": "EnumType", - "values": ["hinge", "logistic_regression", "epsilon_insensitive", "squared_hinge"] + "values": ["logistic_regression", "epsilon_insensitive", "squared_hinge", "hinge"] } }, { @@ -219456,7 +218603,7 @@ }, "type": { "kind": "EnumType", - "values": ["log", "squared_hinge"] + "values": ["squared_hinge", "log"] } }, { @@ -219509,7 +218656,7 @@ "qname": "sklearn.svm._classes.LinearSVC.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -219522,7 +218669,7 @@ "qname": "sklearn.svm._classes.LinearSVC.__init__.penalty", "default_value": "'l2'", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{'l1', 'l2'}, default='l2'", "description": "Specifies the norm used in the penalization. The 'l2'\npenalty is the standard used in SVC. The 'l1' leads to ``coef_``\nvectors that are sparse." @@ -219538,14 +218685,14 @@ "qname": "sklearn.svm._classes.LinearSVC.__init__.loss", "default_value": "'squared_hinge'", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{'hinge', 'squared_hinge'}, default='squared_hinge'", "description": "Specifies the loss function. 'hinge' is the standard SVM loss\n(used e.g. by the SVC class) while 'squared_hinge' is the\nsquare of the hinge loss. The combination of ``penalty='l1'``\nand ``loss='hinge'`` is not supported." }, "type": { "kind": "EnumType", - "values": ["hinge", "squared_hinge"] + "values": ["squared_hinge", "hinge"] } }, { @@ -219554,7 +218701,7 @@ "qname": "sklearn.svm._classes.LinearSVC.__init__.dual", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Select the algorithm to either solve the dual or primal\noptimization problem. Prefer dual=False when n_samples > n_features." @@ -219570,7 +218717,7 @@ "qname": "sklearn.svm._classes.LinearSVC.__init__.tol", "default_value": "0.0001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-4", "description": "Tolerance for stopping criteria." @@ -219586,7 +218733,7 @@ "qname": "sklearn.svm._classes.LinearSVC.__init__.C", "default_value": "1.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1.0", "description": "Regularization parameter. The strength of the regularization is\ninversely proportional to C. Must be strictly positive." @@ -219602,14 +218749,14 @@ "qname": "sklearn.svm._classes.LinearSVC.__init__.multi_class", "default_value": "'ovr'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'ovr', 'crammer_singer'}, default='ovr'", "description": "Determines the multi-class strategy if `y` contains more than\ntwo classes.\n``\"ovr\"`` trains n_classes one-vs-rest classifiers, while\n``\"crammer_singer\"`` optimizes a joint objective over all classes.\nWhile `crammer_singer` is interesting from a theoretical perspective\nas it is consistent, it is seldom used in practice as it rarely leads\nto better accuracy and is more expensive to compute.\nIf ``\"crammer_singer\"`` is chosen, the options loss, penalty and dual\nwill be ignored." }, "type": { "kind": "EnumType", - "values": ["ovr", "crammer_singer"] + "values": ["crammer_singer", "ovr"] } }, { @@ -219618,7 +218765,7 @@ "qname": "sklearn.svm._classes.LinearSVC.__init__.fit_intercept", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to calculate the intercept for this model. If set\nto false, no intercept will be used in calculations\n(i.e. data is expected to be already centered)." @@ -219634,7 +218781,7 @@ "qname": "sklearn.svm._classes.LinearSVC.__init__.intercept_scaling", "default_value": "1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1", "description": "When self.fit_intercept is True, instance vector x becomes\n``[x, self.intercept_scaling]``,\ni.e. a \"synthetic\" feature with constant value equals to\nintercept_scaling is appended to the instance vector.\nThe intercept becomes intercept_scaling * synthetic feature weight\nNote! the synthetic feature weight is subject to l1/l2 regularization\nas all other features.\nTo lessen the effect of regularization on synthetic feature weight\n(and therefore on the intercept) intercept_scaling has to be increased." @@ -219650,7 +218797,7 @@ "qname": "sklearn.svm._classes.LinearSVC.__init__.class_weight", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict or 'balanced', default=None", "description": "Set the parameter C of class i to ``class_weight[i]*C`` for\nSVC. If not given, all classes are supposed to have\nweight one.\nThe \"balanced\" mode uses the values of y to automatically adjust\nweights inversely proportional to class frequencies in the input data\nas ``n_samples / (n_classes * np.bincount(y))``." @@ -219675,7 +218822,7 @@ "qname": "sklearn.svm._classes.LinearSVC.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "Enable verbose output. Note that this setting takes advantage of a\nper-process runtime setting in liblinear that, if enabled, may not work\nproperly in a multithreaded context." @@ -219691,7 +218838,7 @@ "qname": "sklearn.svm._classes.LinearSVC.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Controls the pseudo random number generation for shuffling the data for\nthe dual coordinate descent (if ``dual=True``). When ``dual=False`` the\nunderlying implementation of :class:`LinearSVC` is not random and\n``random_state`` has no effect on the results.\nPass an int for reproducible output across multiple function calls.\nSee :term:`Glossary `." @@ -219720,7 +218867,7 @@ "qname": "sklearn.svm._classes.LinearSVC.__init__.max_iter", "default_value": "1000", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1000", "description": "The maximum number of iterations to be run." @@ -219732,7 +218879,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -219773,7 +218920,7 @@ "qname": "sklearn.svm._classes.LinearSVC.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -219786,7 +218933,7 @@ "qname": "sklearn.svm._classes.LinearSVC.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Training vector, where `n_samples` is the number of samples and\n`n_features` is the number of features." @@ -219811,7 +218958,7 @@ "qname": "sklearn.svm._classes.LinearSVC.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "Target vector relative to X." @@ -219827,7 +218974,7 @@ "qname": "sklearn.svm._classes.LinearSVC.fit.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "Array of weights that are assigned to individual\nsamples. If not provided,\nthen each sample is given unit weight.\n\n.. versionadded:: 0.18" @@ -219839,7 +218986,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the model according to the given training data.", "docstring": "Fit the model according to the given training data.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vector, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n y : array-like of shape (n_samples,)\n Target vector relative to X.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Array of weights that are assigned to individual\n samples. If not provided,\n then each sample is given unit weight.\n\n .. versionadded:: 0.18\n\n Returns\n -------\n self : object\n An instance of the estimator.\n " }, @@ -219855,7 +219002,7 @@ "qname": "sklearn.svm._classes.LinearSVR.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -219868,7 +219015,7 @@ "qname": "sklearn.svm._classes.LinearSVR.__init__.epsilon", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0", "description": "Epsilon parameter in the epsilon-insensitive loss function. Note\nthat the value of this parameter depends on the scale of the target\nvariable y. If unsure, set ``epsilon=0``." @@ -219884,7 +219031,7 @@ "qname": "sklearn.svm._classes.LinearSVR.__init__.tol", "default_value": "0.0001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-4", "description": "Tolerance for stopping criteria." @@ -219900,7 +219047,7 @@ "qname": "sklearn.svm._classes.LinearSVR.__init__.C", "default_value": "1.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1.0", "description": "Regularization parameter. The strength of the regularization is\ninversely proportional to C. Must be strictly positive." @@ -219916,14 +219063,14 @@ "qname": "sklearn.svm._classes.LinearSVR.__init__.loss", "default_value": "'epsilon_insensitive'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'epsilon_insensitive', 'squared_epsilon_insensitive'}, default='epsilon_insensitive'", "description": "Specifies the loss function. The epsilon-insensitive loss\n(standard SVR) is the L1 loss, while the squared epsilon-insensitive\nloss ('squared_epsilon_insensitive') is the L2 loss." }, "type": { "kind": "EnumType", - "values": ["squared_epsilon_insensitive", "epsilon_insensitive"] + "values": ["epsilon_insensitive", "squared_epsilon_insensitive"] } }, { @@ -219932,7 +219079,7 @@ "qname": "sklearn.svm._classes.LinearSVR.__init__.fit_intercept", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to calculate the intercept for this model. If set\nto false, no intercept will be used in calculations\n(i.e. data is expected to be already centered)." @@ -219948,7 +219095,7 @@ "qname": "sklearn.svm._classes.LinearSVR.__init__.intercept_scaling", "default_value": "1.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1.0", "description": "When self.fit_intercept is True, instance vector x becomes\n[x, self.intercept_scaling],\ni.e. a \"synthetic\" feature with constant value equals to\nintercept_scaling is appended to the instance vector.\nThe intercept becomes intercept_scaling * synthetic feature weight\nNote! the synthetic feature weight is subject to l1/l2 regularization\nas all other features.\nTo lessen the effect of regularization on synthetic feature weight\n(and therefore on the intercept) intercept_scaling has to be increased." @@ -219964,7 +219111,7 @@ "qname": "sklearn.svm._classes.LinearSVR.__init__.dual", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Select the algorithm to either solve the dual or primal\noptimization problem. Prefer dual=False when n_samples > n_features." @@ -219980,7 +219127,7 @@ "qname": "sklearn.svm._classes.LinearSVR.__init__.verbose", "default_value": "0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=0", "description": "Enable verbose output. Note that this setting takes advantage of a\nper-process runtime setting in liblinear that, if enabled, may not work\nproperly in a multithreaded context." @@ -219996,7 +219143,7 @@ "qname": "sklearn.svm._classes.LinearSVR.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Controls the pseudo random number generation for shuffling the data.\nPass an int for reproducible output across multiple function calls.\nSee :term:`Glossary `." @@ -220025,7 +219172,7 @@ "qname": "sklearn.svm._classes.LinearSVR.__init__.max_iter", "default_value": "1000", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=1000", "description": "The maximum number of iterations to be run." @@ -220037,7 +219184,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -220078,7 +219225,7 @@ "qname": "sklearn.svm._classes.LinearSVR.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -220091,7 +219238,7 @@ "qname": "sklearn.svm._classes.LinearSVR.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Training vector, where `n_samples` is the number of samples and\n`n_features` is the number of features." @@ -220116,7 +219263,7 @@ "qname": "sklearn.svm._classes.LinearSVR.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,)", "description": "Target vector relative to X." @@ -220132,7 +219279,7 @@ "qname": "sklearn.svm._classes.LinearSVR.fit.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "Array of weights that are assigned to individual\nsamples. If not provided,\nthen each sample is given unit weight.\n\n.. versionadded:: 0.18" @@ -220144,7 +219291,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Fit the model according to the given training data.", "docstring": "Fit the model according to the given training data.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vector, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n y : array-like of shape (n_samples,)\n Target vector relative to X.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Array of weights that are assigned to individual\n samples. If not provided,\n then each sample is given unit weight.\n\n .. versionadded:: 0.18\n\n Returns\n -------\n self : object\n An instance of the estimator.\n " }, @@ -220160,7 +219307,7 @@ "qname": "sklearn.svm._classes.NuSVC.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -220173,7 +219320,7 @@ "qname": "sklearn.svm._classes.NuSVC.__init__.nu", "default_value": "0.5", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.5", "description": "An upper bound on the fraction of margin errors (see :ref:`User Guide\n`) and a lower bound of the fraction of support vectors.\nShould be in the interval (0, 1]." @@ -220202,7 +219349,7 @@ "qname": "sklearn.svm._classes.NuSVC.__init__.kernel", "default_value": "'rbf'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'linear', 'poly', 'rbf', 'sigmoid', 'precomputed'} or callable, default='rbf'", "description": "Specifies the kernel type to be used in the algorithm.\nIf none is given, 'rbf' will be used. If a callable is given it is\nused to precompute the kernel matrix." @@ -220212,7 +219359,7 @@ "types": [ { "kind": "EnumType", - "values": ["sigmoid", "precomputed", "linear", "rbf", "poly"] + "values": ["rbf", "precomputed", "poly", "sigmoid", "linear"] }, { "kind": "NamedType", @@ -220227,7 +219374,7 @@ "qname": "sklearn.svm._classes.NuSVC.__init__.degree", "default_value": "3", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=3", "description": "Degree of the polynomial kernel function ('poly').\nIgnored by all other kernels." @@ -220243,7 +219390,7 @@ "qname": "sklearn.svm._classes.NuSVC.__init__.gamma", "default_value": "'scale'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'scale', 'auto'} or float, default='scale'", "description": "Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.\n\n- if ``gamma='scale'`` (default) is passed then it uses\n 1 / (n_features * X.var()) as value of gamma,\n- if 'auto', uses 1 / n_features.\n\n.. versionchanged:: 0.22\n The default value of ``gamma`` changed from 'auto' to 'scale'." @@ -220253,7 +219400,7 @@ "types": [ { "kind": "EnumType", - "values": ["scale", "auto"] + "values": ["auto", "scale"] }, { "kind": "NamedType", @@ -220268,7 +219415,7 @@ "qname": "sklearn.svm._classes.NuSVC.__init__.coef0", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0", "description": "Independent term in kernel function.\nIt is only significant in 'poly' and 'sigmoid'." @@ -220284,7 +219431,7 @@ "qname": "sklearn.svm._classes.NuSVC.__init__.shrinking", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to use the shrinking heuristic.\nSee the :ref:`User Guide `." @@ -220300,7 +219447,7 @@ "qname": "sklearn.svm._classes.NuSVC.__init__.probability", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Whether to enable probability estimates. This must be enabled prior\nto calling `fit`, will slow down that method as it internally uses\n5-fold cross-validation, and `predict_proba` may be inconsistent with\n`predict`. Read more in the :ref:`User Guide `." @@ -220316,7 +219463,7 @@ "qname": "sklearn.svm._classes.NuSVC.__init__.tol", "default_value": "0.001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-3", "description": "Tolerance for stopping criterion." @@ -220332,7 +219479,7 @@ "qname": "sklearn.svm._classes.NuSVC.__init__.cache_size", "default_value": "200", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=200", "description": "Specify the size of the kernel cache (in MB)." @@ -220348,7 +219495,7 @@ "qname": "sklearn.svm._classes.NuSVC.__init__.class_weight", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{dict, 'balanced'}, default=None", "description": "Set the parameter C of class i to class_weight[i]*C for\nSVC. If not given, all classes are supposed to have\nweight one. The \"balanced\" mode uses the values of y to automatically\nadjust weights inversely proportional to class frequencies as\n``n_samples / (n_classes * np.bincount(y))``." @@ -220364,7 +219511,7 @@ "qname": "sklearn.svm._classes.NuSVC.__init__.verbose", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Enable verbose output. Note that this setting takes advantage of a\nper-process runtime setting in libsvm that, if enabled, may not work\nproperly in a multithreaded context." @@ -220380,7 +219527,7 @@ "qname": "sklearn.svm._classes.NuSVC.__init__.max_iter", "default_value": "-1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=-1", "description": "Hard limit on iterations within solver, or -1 for no limit." @@ -220396,7 +219543,7 @@ "qname": "sklearn.svm._classes.NuSVC.__init__.decision_function_shape", "default_value": "'ovr'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'ovo', 'ovr'}, default='ovr'", "description": "Whether to return a one-vs-rest ('ovr') decision function of shape\n(n_samples, n_classes) as all other classifiers, or the original\none-vs-one ('ovo') decision function of libsvm which has shape\n(n_samples, n_classes * (n_classes - 1) / 2). However, one-vs-one\n('ovo') is always used as multi-class strategy. The parameter is\nignored for binary classification.\n\n.. versionchanged:: 0.19\n decision_function_shape is 'ovr' by default.\n\n.. versionadded:: 0.17\n *decision_function_shape='ovr'* is recommended.\n\n.. versionchanged:: 0.17\n Deprecated *decision_function_shape='ovo' and None*." @@ -220412,7 +219559,7 @@ "qname": "sklearn.svm._classes.NuSVC.__init__.break_ties", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If true, ``decision_function_shape='ovr'``, and number of classes > 2,\n:term:`predict` will break ties according to the confidence values of\n:term:`decision_function`; otherwise the first class among the tied\nclasses is returned. Please note that breaking ties comes at a\nrelatively high computational cost compared to a simple predict.\n\n.. versionadded:: 0.22" @@ -220428,7 +219575,7 @@ "qname": "sklearn.svm._classes.NuSVC.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Controls the pseudo random number generation for shuffling the data for\nprobability estimates. Ignored when `probability` is False.\nPass an int for reproducible output across multiple function calls.\nSee :term:`Glossary `." @@ -220453,7 +219600,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -220494,7 +219641,7 @@ "qname": "sklearn.svm._classes.NuSVR.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -220507,7 +219654,7 @@ "qname": "sklearn.svm._classes.NuSVR.__init__.nu", "default_value": "0.5", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.5", "description": "An upper bound on the fraction of training errors and a lower bound of\nthe fraction of support vectors. Should be in the interval (0, 1]. By\ndefault 0.5 will be taken." @@ -220536,7 +219683,7 @@ "qname": "sklearn.svm._classes.NuSVR.__init__.C", "default_value": "1.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1.0", "description": "Penalty parameter C of the error term." @@ -220552,7 +219699,7 @@ "qname": "sklearn.svm._classes.NuSVR.__init__.kernel", "default_value": "'rbf'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'linear', 'poly', 'rbf', 'sigmoid', 'precomputed'} or callable, default='rbf'", "description": "Specifies the kernel type to be used in the algorithm.\nIf none is given, 'rbf' will be used. If a callable is given it is\nused to precompute the kernel matrix." @@ -220562,7 +219709,7 @@ "types": [ { "kind": "EnumType", - "values": ["sigmoid", "precomputed", "linear", "rbf", "poly"] + "values": ["rbf", "precomputed", "poly", "sigmoid", "linear"] }, { "kind": "NamedType", @@ -220577,7 +219724,7 @@ "qname": "sklearn.svm._classes.NuSVR.__init__.degree", "default_value": "3", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=3", "description": "Degree of the polynomial kernel function ('poly').\nIgnored by all other kernels." @@ -220593,7 +219740,7 @@ "qname": "sklearn.svm._classes.NuSVR.__init__.gamma", "default_value": "'scale'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'scale', 'auto'} or float, default='scale'", "description": "Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.\n\n- if ``gamma='scale'`` (default) is passed then it uses\n 1 / (n_features * X.var()) as value of gamma,\n- if 'auto', uses 1 / n_features.\n\n.. versionchanged:: 0.22\n The default value of ``gamma`` changed from 'auto' to 'scale'." @@ -220603,7 +219750,7 @@ "types": [ { "kind": "EnumType", - "values": ["scale", "auto"] + "values": ["auto", "scale"] }, { "kind": "NamedType", @@ -220618,7 +219765,7 @@ "qname": "sklearn.svm._classes.NuSVR.__init__.coef0", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0", "description": "Independent term in kernel function.\nIt is only significant in 'poly' and 'sigmoid'." @@ -220634,7 +219781,7 @@ "qname": "sklearn.svm._classes.NuSVR.__init__.shrinking", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to use the shrinking heuristic.\nSee the :ref:`User Guide `." @@ -220650,7 +219797,7 @@ "qname": "sklearn.svm._classes.NuSVR.__init__.tol", "default_value": "0.001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-3", "description": "Tolerance for stopping criterion." @@ -220666,7 +219813,7 @@ "qname": "sklearn.svm._classes.NuSVR.__init__.cache_size", "default_value": "200", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=200", "description": "Specify the size of the kernel cache (in MB)." @@ -220682,7 +219829,7 @@ "qname": "sklearn.svm._classes.NuSVR.__init__.verbose", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Enable verbose output. Note that this setting takes advantage of a\nper-process runtime setting in libsvm that, if enabled, may not work\nproperly in a multithreaded context." @@ -220698,7 +219845,7 @@ "qname": "sklearn.svm._classes.NuSVR.__init__.max_iter", "default_value": "-1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=-1", "description": "Hard limit on iterations within solver, or -1 for no limit." @@ -220710,7 +219857,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -220739,34 +219886,6 @@ "description": "", "docstring": null }, - { - "id": "sklearn/sklearn.svm._classes/NuSVR/class_weight_@getter", - "name": "class_weight_", - "qname": "sklearn.svm._classes.NuSVR.class_weight_", - "decorators": [ - "deprecated('Attribute `class_weight_` was deprecated in version 1.2 and will be removed in 1.4.')", - "property" - ], - "parameters": [ - { - "id": "sklearn/sklearn.svm._classes/NuSVR/class_weight_/self", - "name": "self", - "qname": "sklearn.svm._classes.NuSVR.class_weight_.self", - "default_value": null, - "assigned_by": "IMPLICIT", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - } - ], - "results": [], - "is_public": false, - "description": "", - "docstring": null - }, { "id": "sklearn/sklearn.svm._classes/OneClassSVM/__init__", "name": "__init__", @@ -220779,7 +219898,7 @@ "qname": "sklearn.svm._classes.OneClassSVM.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -220792,7 +219911,7 @@ "qname": "sklearn.svm._classes.OneClassSVM.__init__.kernel", "default_value": "'rbf'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'linear', 'poly', 'rbf', 'sigmoid', 'precomputed'} or callable, default='rbf'", "description": "Specifies the kernel type to be used in the algorithm.\nIf none is given, 'rbf' will be used. If a callable is given it is\nused to precompute the kernel matrix." @@ -220802,7 +219921,7 @@ "types": [ { "kind": "EnumType", - "values": ["sigmoid", "precomputed", "linear", "rbf", "poly"] + "values": ["rbf", "precomputed", "poly", "sigmoid", "linear"] }, { "kind": "NamedType", @@ -220817,7 +219936,7 @@ "qname": "sklearn.svm._classes.OneClassSVM.__init__.degree", "default_value": "3", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=3", "description": "Degree of the polynomial kernel function ('poly').\nIgnored by all other kernels." @@ -220833,7 +219952,7 @@ "qname": "sklearn.svm._classes.OneClassSVM.__init__.gamma", "default_value": "'scale'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'scale', 'auto'} or float, default='scale'", "description": "Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.\n\n- if ``gamma='scale'`` (default) is passed then it uses\n 1 / (n_features * X.var()) as value of gamma,\n- if 'auto', uses 1 / n_features.\n\n.. versionchanged:: 0.22\n The default value of ``gamma`` changed from 'auto' to 'scale'." @@ -220843,7 +219962,7 @@ "types": [ { "kind": "EnumType", - "values": ["scale", "auto"] + "values": ["auto", "scale"] }, { "kind": "NamedType", @@ -220858,7 +219977,7 @@ "qname": "sklearn.svm._classes.OneClassSVM.__init__.coef0", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0", "description": "Independent term in kernel function.\nIt is only significant in 'poly' and 'sigmoid'." @@ -220874,7 +219993,7 @@ "qname": "sklearn.svm._classes.OneClassSVM.__init__.tol", "default_value": "0.001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-3", "description": "Tolerance for stopping criterion." @@ -220890,7 +220009,7 @@ "qname": "sklearn.svm._classes.OneClassSVM.__init__.nu", "default_value": "0.5", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.5", "description": "An upper bound on the fraction of training\nerrors and a lower bound of the fraction of support\nvectors. Should be in the interval (0, 1]. By default 0.5\nwill be taken." @@ -220919,7 +220038,7 @@ "qname": "sklearn.svm._classes.OneClassSVM.__init__.shrinking", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to use the shrinking heuristic.\nSee the :ref:`User Guide `." @@ -220935,7 +220054,7 @@ "qname": "sklearn.svm._classes.OneClassSVM.__init__.cache_size", "default_value": "200", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=200", "description": "Specify the size of the kernel cache (in MB)." @@ -220951,7 +220070,7 @@ "qname": "sklearn.svm._classes.OneClassSVM.__init__.verbose", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Enable verbose output. Note that this setting takes advantage of a\nper-process runtime setting in libsvm that, if enabled, may not work\nproperly in a multithreaded context." @@ -220967,7 +220086,7 @@ "qname": "sklearn.svm._classes.OneClassSVM.__init__.max_iter", "default_value": "-1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=-1", "description": "Hard limit on iterations within solver, or -1 for no limit." @@ -220979,7 +220098,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -221008,34 +220127,6 @@ "description": "", "docstring": null }, - { - "id": "sklearn/sklearn.svm._classes/OneClassSVM/class_weight_@getter", - "name": "class_weight_", - "qname": "sklearn.svm._classes.OneClassSVM.class_weight_", - "decorators": [ - "deprecated('Attribute `class_weight_` was deprecated in version 1.2 and will be removed in 1.4.')", - "property" - ], - "parameters": [ - { - "id": "sklearn/sklearn.svm._classes/OneClassSVM/class_weight_/self", - "name": "self", - "qname": "sklearn.svm._classes.OneClassSVM.class_weight_.self", - "default_value": null, - "assigned_by": "IMPLICIT", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - } - ], - "results": [], - "is_public": false, - "description": "", - "docstring": null - }, { "id": "sklearn/sklearn.svm._classes/OneClassSVM/decision_function", "name": "decision_function", @@ -221048,7 +220139,7 @@ "qname": "sklearn.svm._classes.OneClassSVM.decision_function.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -221061,7 +220152,7 @@ "qname": "sklearn.svm._classes.OneClassSVM.decision_function.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "The data matrix." @@ -221073,7 +220164,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Signed distance to the separating hyperplane.\n\nSigned distance is positive for an inlier and negative for an outlier.", "docstring": "Signed distance to the separating hyperplane.\n\n Signed distance is positive for an inlier and negative for an outlier.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The data matrix.\n\n Returns\n -------\n dec : ndarray of shape (n_samples,)\n Returns the decision function of the samples.\n " }, @@ -221089,7 +220180,7 @@ "qname": "sklearn.svm._classes.OneClassSVM.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -221102,7 +220193,7 @@ "qname": "sklearn.svm._classes.OneClassSVM.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "Set of samples, where `n_samples` is the number of samples and\n`n_features` is the number of features." @@ -221127,7 +220218,7 @@ "qname": "sklearn.svm._classes.OneClassSVM.fit.y", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "Ignored", "description": "Not used, present for API consistency by convention." @@ -221143,7 +220234,7 @@ "qname": "sklearn.svm._classes.OneClassSVM.fit.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "Per-sample weights. Rescale C per sample. Higher weights\nforce the classifier to put more emphasis on these points." @@ -221155,7 +220246,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Detect the soft boundary of the set of samples X.", "docstring": "Detect the soft boundary of the set of samples X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Set of samples, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Per-sample weights. Rescale C per sample. Higher weights\n force the classifier to put more emphasis on these points.\n\n **params : dict\n Additional fit parameters.\n\n .. deprecated:: 1.0\n The `fit` method will not longer accept extra keyword\n parameters in 1.2. These keyword parameters were\n already discarded.\n\n Returns\n -------\n self : object\n Fitted estimator.\n\n Notes\n -----\n If X is not a C-ordered contiguous array it is copied.\n " }, @@ -221171,7 +220262,7 @@ "qname": "sklearn.svm._classes.OneClassSVM.predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -221184,7 +220275,7 @@ "qname": "sklearn.svm._classes.OneClassSVM.predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples_test, n_samples_train)", "description": "For kernel=\"precomputed\", the expected shape of X is\n(n_samples_test, n_samples_train)." @@ -221205,7 +220296,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Perform classification on samples in X.\n\nFor a one-class model, +1 or -1 is returned.", "docstring": "Perform classification on samples in X.\n\n For a one-class model, +1 or -1 is returned.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples_test, n_samples_train)\n For kernel=\"precomputed\", the expected shape of X is\n (n_samples_test, n_samples_train).\n\n Returns\n -------\n y_pred : ndarray of shape (n_samples,)\n Class labels for samples in X.\n " }, @@ -221221,7 +220312,7 @@ "qname": "sklearn.svm._classes.OneClassSVM.score_samples.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -221234,7 +220325,7 @@ "qname": "sklearn.svm._classes.OneClassSVM.score_samples.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples, n_features)", "description": "The data matrix." @@ -221246,7 +220337,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Raw scoring function of the samples.", "docstring": "Raw scoring function of the samples.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The data matrix.\n\n Returns\n -------\n score_samples : ndarray of shape (n_samples,)\n Returns the (unshifted) scoring function of the samples.\n " }, @@ -221262,7 +220353,7 @@ "qname": "sklearn.svm._classes.SVC.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -221275,7 +220366,7 @@ "qname": "sklearn.svm._classes.SVC.__init__.C", "default_value": "1.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1.0", "description": "Regularization parameter. The strength of the regularization is\ninversely proportional to C. Must be strictly positive. The penalty\nis a squared l2 penalty." @@ -221291,7 +220382,7 @@ "qname": "sklearn.svm._classes.SVC.__init__.kernel", "default_value": "'rbf'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'linear', 'poly', 'rbf', 'sigmoid', 'precomputed'} or callable, default='rbf'", "description": "Specifies the kernel type to be used in the algorithm.\nIf none is given, 'rbf' will be used. If a callable is given it is\nused to pre-compute the kernel matrix from data matrices; that matrix\nshould be an array of shape ``(n_samples, n_samples)``." @@ -221301,7 +220392,7 @@ "types": [ { "kind": "EnumType", - "values": ["sigmoid", "precomputed", "linear", "rbf", "poly"] + "values": ["rbf", "precomputed", "poly", "sigmoid", "linear"] }, { "kind": "NamedType", @@ -221316,7 +220407,7 @@ "qname": "sklearn.svm._classes.SVC.__init__.degree", "default_value": "3", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=3", "description": "Degree of the polynomial kernel function ('poly').\nIgnored by all other kernels." @@ -221332,7 +220423,7 @@ "qname": "sklearn.svm._classes.SVC.__init__.gamma", "default_value": "'scale'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'scale', 'auto'} or float, default='scale'", "description": "Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.\n\n- if ``gamma='scale'`` (default) is passed then it uses\n 1 / (n_features * X.var()) as value of gamma,\n- if 'auto', uses 1 / n_features.\n\n.. versionchanged:: 0.22\n The default value of ``gamma`` changed from 'auto' to 'scale'." @@ -221342,7 +220433,7 @@ "types": [ { "kind": "EnumType", - "values": ["scale", "auto"] + "values": ["auto", "scale"] }, { "kind": "NamedType", @@ -221357,7 +220448,7 @@ "qname": "sklearn.svm._classes.SVC.__init__.coef0", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0", "description": "Independent term in kernel function.\nIt is only significant in 'poly' and 'sigmoid'." @@ -221373,7 +220464,7 @@ "qname": "sklearn.svm._classes.SVC.__init__.shrinking", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to use the shrinking heuristic.\nSee the :ref:`User Guide `." @@ -221389,7 +220480,7 @@ "qname": "sklearn.svm._classes.SVC.__init__.probability", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Whether to enable probability estimates. This must be enabled prior\nto calling `fit`, will slow down that method as it internally uses\n5-fold cross-validation, and `predict_proba` may be inconsistent with\n`predict`. Read more in the :ref:`User Guide `." @@ -221405,7 +220496,7 @@ "qname": "sklearn.svm._classes.SVC.__init__.tol", "default_value": "0.001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-3", "description": "Tolerance for stopping criterion." @@ -221421,7 +220512,7 @@ "qname": "sklearn.svm._classes.SVC.__init__.cache_size", "default_value": "200", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=200", "description": "Specify the size of the kernel cache (in MB)." @@ -221437,7 +220528,7 @@ "qname": "sklearn.svm._classes.SVC.__init__.class_weight", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict or 'balanced', default=None", "description": "Set the parameter C of class i to class_weight[i]*C for\nSVC. If not given, all classes are supposed to have\nweight one.\nThe \"balanced\" mode uses the values of y to automatically adjust\nweights inversely proportional to class frequencies in the input data\nas ``n_samples / (n_classes * np.bincount(y))``." @@ -221462,7 +220553,7 @@ "qname": "sklearn.svm._classes.SVC.__init__.verbose", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Enable verbose output. Note that this setting takes advantage of a\nper-process runtime setting in libsvm that, if enabled, may not work\nproperly in a multithreaded context." @@ -221478,7 +220569,7 @@ "qname": "sklearn.svm._classes.SVC.__init__.max_iter", "default_value": "-1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=-1", "description": "Hard limit on iterations within solver, or -1 for no limit." @@ -221494,7 +220585,7 @@ "qname": "sklearn.svm._classes.SVC.__init__.decision_function_shape", "default_value": "'ovr'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'ovo', 'ovr'}, default='ovr'", "description": "Whether to return a one-vs-rest ('ovr') decision function of shape\n(n_samples, n_classes) as all other classifiers, or the original\none-vs-one ('ovo') decision function of libsvm which has shape\n(n_samples, n_classes * (n_classes - 1) / 2). However, note that\ninternally, one-vs-one ('ovo') is always used as a multi-class strategy\nto train models; an ovr matrix is only constructed from the ovo matrix.\nThe parameter is ignored for binary classification.\n\n.. versionchanged:: 0.19\n decision_function_shape is 'ovr' by default.\n\n.. versionadded:: 0.17\n *decision_function_shape='ovr'* is recommended.\n\n.. versionchanged:: 0.17\n Deprecated *decision_function_shape='ovo' and None*." @@ -221510,7 +220601,7 @@ "qname": "sklearn.svm._classes.SVC.__init__.break_ties", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "If true, ``decision_function_shape='ovr'``, and number of classes > 2,\n:term:`predict` will break ties according to the confidence values of\n:term:`decision_function`; otherwise the first class among the tied\nclasses is returned. Please note that breaking ties comes at a\nrelatively high computational cost compared to a simple predict.\n\n.. versionadded:: 0.22" @@ -221526,7 +220617,7 @@ "qname": "sklearn.svm._classes.SVC.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Controls the pseudo random number generation for shuffling the data for\nprobability estimates. Ignored when `probability` is False.\nPass an int for reproducible output across multiple function calls.\nSee :term:`Glossary `." @@ -221551,7 +220642,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -221592,7 +220683,7 @@ "qname": "sklearn.svm._classes.SVR.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -221605,7 +220696,7 @@ "qname": "sklearn.svm._classes.SVR.__init__.kernel", "default_value": "'rbf'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'linear', 'poly', 'rbf', 'sigmoid', 'precomputed'} or callable, default='rbf'", "description": "Specifies the kernel type to be used in the algorithm.\nIf none is given, 'rbf' will be used. If a callable is given it is\nused to precompute the kernel matrix." @@ -221615,7 +220706,7 @@ "types": [ { "kind": "EnumType", - "values": ["sigmoid", "precomputed", "linear", "rbf", "poly"] + "values": ["rbf", "precomputed", "poly", "sigmoid", "linear"] }, { "kind": "NamedType", @@ -221630,7 +220721,7 @@ "qname": "sklearn.svm._classes.SVR.__init__.degree", "default_value": "3", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=3", "description": "Degree of the polynomial kernel function ('poly').\nIgnored by all other kernels." @@ -221646,7 +220737,7 @@ "qname": "sklearn.svm._classes.SVR.__init__.gamma", "default_value": "'scale'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{'scale', 'auto'} or float, default='scale'", "description": "Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.\n\n- if ``gamma='scale'`` (default) is passed then it uses\n 1 / (n_features * X.var()) as value of gamma,\n- if 'auto', uses 1 / n_features.\n\n.. versionchanged:: 0.22\n The default value of ``gamma`` changed from 'auto' to 'scale'." @@ -221656,7 +220747,7 @@ "types": [ { "kind": "EnumType", - "values": ["scale", "auto"] + "values": ["auto", "scale"] }, { "kind": "NamedType", @@ -221671,7 +220762,7 @@ "qname": "sklearn.svm._classes.SVR.__init__.coef0", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0", "description": "Independent term in kernel function.\nIt is only significant in 'poly' and 'sigmoid'." @@ -221687,7 +220778,7 @@ "qname": "sklearn.svm._classes.SVR.__init__.tol", "default_value": "0.001", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1e-3", "description": "Tolerance for stopping criterion." @@ -221703,7 +220794,7 @@ "qname": "sklearn.svm._classes.SVR.__init__.C", "default_value": "1.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=1.0", "description": "Regularization parameter. The strength of the regularization is\ninversely proportional to C. Must be strictly positive.\nThe penalty is a squared l2 penalty." @@ -221719,7 +220810,7 @@ "qname": "sklearn.svm._classes.SVR.__init__.epsilon", "default_value": "0.1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.1", "description": "Epsilon in the epsilon-SVR model. It specifies the epsilon-tube\nwithin which no penalty is associated in the training loss function\nwith points predicted within a distance epsilon from the actual\nvalue." @@ -221735,7 +220826,7 @@ "qname": "sklearn.svm._classes.SVR.__init__.shrinking", "default_value": "True", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Whether to use the shrinking heuristic.\nSee the :ref:`User Guide `." @@ -221751,7 +220842,7 @@ "qname": "sklearn.svm._classes.SVR.__init__.cache_size", "default_value": "200", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=200", "description": "Specify the size of the kernel cache (in MB)." @@ -221767,7 +220858,7 @@ "qname": "sklearn.svm._classes.SVR.__init__.verbose", "default_value": "False", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=False", "description": "Enable verbose output. Note that this setting takes advantage of a\nper-process runtime setting in libsvm that, if enabled, may not work\nproperly in a multithreaded context." @@ -221783,7 +220874,7 @@ "qname": "sklearn.svm._classes.SVR.__init__.max_iter", "default_value": "-1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=-1", "description": "Hard limit on iterations within solver, or -1 for no limit." @@ -221795,7 +220886,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -221824,34 +220915,6 @@ "description": "", "docstring": null }, - { - "id": "sklearn/sklearn.svm._classes/SVR/class_weight_@getter", - "name": "class_weight_", - "qname": "sklearn.svm._classes.SVR.class_weight_", - "decorators": [ - "deprecated('Attribute `class_weight_` was deprecated in version 1.2 and will be removed in 1.4.')", - "property" - ], - "parameters": [ - { - "id": "sklearn/sklearn.svm._classes/SVR/class_weight_/self", - "name": "self", - "qname": "sklearn.svm._classes.SVR.class_weight_.self", - "default_value": null, - "assigned_by": "IMPLICIT", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - } - ], - "results": [], - "is_public": false, - "description": "", - "docstring": null - }, { "id": "sklearn/sklearn.svm.setup/configuration", "name": "configuration", @@ -221902,7 +220965,7 @@ "qname": "sklearn.tree._classes.BaseDecisionTree.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -221915,7 +220978,7 @@ "qname": "sklearn.tree._classes.BaseDecisionTree.__init__.criterion", "default_value": null, "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -221928,7 +220991,7 @@ "qname": "sklearn.tree._classes.BaseDecisionTree.__init__.splitter", "default_value": null, "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -221941,7 +221004,7 @@ "qname": "sklearn.tree._classes.BaseDecisionTree.__init__.max_depth", "default_value": null, "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -221954,7 +221017,7 @@ "qname": "sklearn.tree._classes.BaseDecisionTree.__init__.min_samples_split", "default_value": null, "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -221967,7 +221030,7 @@ "qname": "sklearn.tree._classes.BaseDecisionTree.__init__.min_samples_leaf", "default_value": null, "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -221980,7 +221043,7 @@ "qname": "sklearn.tree._classes.BaseDecisionTree.__init__.min_weight_fraction_leaf", "default_value": null, "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -221993,7 +221056,7 @@ "qname": "sklearn.tree._classes.BaseDecisionTree.__init__.max_features", "default_value": null, "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -222006,7 +221069,7 @@ "qname": "sklearn.tree._classes.BaseDecisionTree.__init__.max_leaf_nodes", "default_value": null, "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -222019,7 +221082,7 @@ "qname": "sklearn.tree._classes.BaseDecisionTree.__init__.random_state", "default_value": null, "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -222032,7 +221095,7 @@ "qname": "sklearn.tree._classes.BaseDecisionTree.__init__.min_impurity_decrease", "default_value": null, "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -222045,7 +221108,7 @@ "qname": "sklearn.tree._classes.BaseDecisionTree.__init__.class_weight", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -222058,7 +221121,7 @@ "qname": "sklearn.tree._classes.BaseDecisionTree.__init__.ccp_alpha", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -222067,7 +221130,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -222159,7 +221222,7 @@ "qname": "sklearn.tree._classes.BaseDecisionTree.apply.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -222172,7 +221235,7 @@ "qname": "sklearn.tree._classes.BaseDecisionTree.apply.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The input samples. Internally, it will be converted to\n``dtype=np.float32`` and if a sparse matrix is provided\nto a sparse ``csr_matrix``." @@ -222197,7 +221260,7 @@ "qname": "sklearn.tree._classes.BaseDecisionTree.apply.check_input", "default_value": "True", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Allow to bypass several input checking.\nDon't use this parameter unless you know what you do." @@ -222209,7 +221272,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Return the index of the leaf that each sample is predicted as.\n\n.. versionadded:: 0.17", "docstring": "Return the index of the leaf that each sample is predicted as.\n\n .. versionadded:: 0.17\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csr_matrix``.\n\n check_input : bool, default=True\n Allow to bypass several input checking.\n Don't use this parameter unless you know what you do.\n\n Returns\n -------\n X_leaves : array-like of shape (n_samples,)\n For each datapoint x in X, return the index of the leaf x\n ends up in. Leaves are numbered within\n ``[0; self.tree_.node_count)``, possibly with gaps in the\n numbering.\n " }, @@ -222225,7 +221288,7 @@ "qname": "sklearn.tree._classes.BaseDecisionTree.cost_complexity_pruning_path.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -222238,7 +221301,7 @@ "qname": "sklearn.tree._classes.BaseDecisionTree.cost_complexity_pruning_path.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The training input samples. Internally, it will be converted to\n``dtype=np.float32`` and if a sparse matrix is provided\nto a sparse ``csc_matrix``." @@ -222263,7 +221326,7 @@ "qname": "sklearn.tree._classes.BaseDecisionTree.cost_complexity_pruning_path.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,) or (n_samples, n_outputs)", "description": "The target values (class labels) as integers or strings." @@ -222279,7 +221342,7 @@ "qname": "sklearn.tree._classes.BaseDecisionTree.cost_complexity_pruning_path.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "Sample weights. If None, then samples are equally weighted. Splits\nthat would create child nodes with net zero or negative weight are\nignored while searching for a split in each node. Splits are also\nignored if they would result in any single class carrying a\nnegative weight in either child node." @@ -222291,7 +221354,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Compute the pruning path during Minimal Cost-Complexity Pruning.\n\nSee :ref:`minimal_cost_complexity_pruning` for details on the pruning\nprocess.", "docstring": "Compute the pruning path during Minimal Cost-Complexity Pruning.\n\n See :ref:`minimal_cost_complexity_pruning` for details on the pruning\n process.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The training input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csc_matrix``.\n\n y : array-like of shape (n_samples,) or (n_samples, n_outputs)\n The target values (class labels) as integers or strings.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights. If None, then samples are equally weighted. Splits\n that would create child nodes with net zero or negative weight are\n ignored while searching for a split in each node. Splits are also\n ignored if they would result in any single class carrying a\n negative weight in either child node.\n\n Returns\n -------\n ccp_path : :class:`~sklearn.utils.Bunch`\n Dictionary-like object, with the following attributes.\n\n ccp_alphas : ndarray\n Effective alphas of subtree during pruning.\n\n impurities : ndarray\n Sum of the impurities of the subtree leaves for the\n corresponding alpha value in ``ccp_alphas``.\n " }, @@ -222307,7 +221370,7 @@ "qname": "sklearn.tree._classes.BaseDecisionTree.decision_path.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -222320,7 +221383,7 @@ "qname": "sklearn.tree._classes.BaseDecisionTree.decision_path.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The input samples. Internally, it will be converted to\n``dtype=np.float32`` and if a sparse matrix is provided\nto a sparse ``csr_matrix``." @@ -222345,7 +221408,7 @@ "qname": "sklearn.tree._classes.BaseDecisionTree.decision_path.check_input", "default_value": "True", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Allow to bypass several input checking.\nDon't use this parameter unless you know what you do." @@ -222357,7 +221420,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Return the decision path in the tree.\n\n.. versionadded:: 0.18", "docstring": "Return the decision path in the tree.\n\n .. versionadded:: 0.18\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csr_matrix``.\n\n check_input : bool, default=True\n Allow to bypass several input checking.\n Don't use this parameter unless you know what you do.\n\n Returns\n -------\n indicator : sparse matrix of shape (n_samples, n_nodes)\n Return a node indicator CSR matrix where non zero elements\n indicates that the samples goes through the nodes.\n " }, @@ -222373,7 +221436,7 @@ "qname": "sklearn.tree._classes.BaseDecisionTree.feature_importances_.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -222382,7 +221445,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Return the feature importances.\n\nThe importance of a feature is computed as the (normalized) total\nreduction of the criterion brought by that feature.\nIt is also known as the Gini importance.\n\nWarning: impurity-based feature importances can be misleading for\nhigh cardinality features (many unique values). See\n:func:`sklearn.inspection.permutation_importance` as an alternative.", "docstring": "Return the feature importances.\n\n The importance of a feature is computed as the (normalized) total\n reduction of the criterion brought by that feature.\n It is also known as the Gini importance.\n\n Warning: impurity-based feature importances can be misleading for\n high cardinality features (many unique values). See\n :func:`sklearn.inspection.permutation_importance` as an alternative.\n\n Returns\n -------\n feature_importances_ : ndarray of shape (n_features,)\n Normalized total reduction of criteria by feature\n (Gini importance).\n " }, @@ -222398,7 +221461,7 @@ "qname": "sklearn.tree._classes.BaseDecisionTree.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -222411,7 +221474,7 @@ "qname": "sklearn.tree._classes.BaseDecisionTree.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -222424,7 +221487,7 @@ "qname": "sklearn.tree._classes.BaseDecisionTree.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -222437,7 +221500,7 @@ "qname": "sklearn.tree._classes.BaseDecisionTree.fit.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -222450,7 +221513,7 @@ "qname": "sklearn.tree._classes.BaseDecisionTree.fit.check_input", "default_value": "True", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -222459,7 +221522,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -222475,7 +221538,7 @@ "qname": "sklearn.tree._classes.BaseDecisionTree.get_depth.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -222484,7 +221547,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Return the depth of the decision tree.\n\nThe depth of a tree is the maximum distance between the root\nand any leaf.", "docstring": "Return the depth of the decision tree.\n\n The depth of a tree is the maximum distance between the root\n and any leaf.\n\n Returns\n -------\n self.tree_.max_depth : int\n The maximum depth of the tree.\n " }, @@ -222500,7 +221563,7 @@ "qname": "sklearn.tree._classes.BaseDecisionTree.get_n_leaves.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -222509,7 +221572,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Return the number of leaves of the decision tree.", "docstring": "Return the number of leaves of the decision tree.\n\n Returns\n -------\n self.tree_.n_leaves : int\n Number of leaves.\n " }, @@ -222525,7 +221588,7 @@ "qname": "sklearn.tree._classes.BaseDecisionTree.predict.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -222538,7 +221601,7 @@ "qname": "sklearn.tree._classes.BaseDecisionTree.predict.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The input samples. Internally, it will be converted to\n``dtype=np.float32`` and if a sparse matrix is provided\nto a sparse ``csr_matrix``." @@ -222563,7 +221626,7 @@ "qname": "sklearn.tree._classes.BaseDecisionTree.predict.check_input", "default_value": "True", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Allow to bypass several input checking.\nDon't use this parameter unless you know what you do." @@ -222575,7 +221638,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict class or regression value for X.\n\nFor a classification model, the predicted class for each sample in X is\nreturned. For a regression model, the predicted value based on X is\nreturned.", "docstring": "Predict class or regression value for X.\n\n For a classification model, the predicted class for each sample in X is\n returned. For a regression model, the predicted value based on X is\n returned.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csr_matrix``.\n\n check_input : bool, default=True\n Allow to bypass several input checking.\n Don't use this parameter unless you know what you do.\n\n Returns\n -------\n y : array-like of shape (n_samples,) or (n_samples, n_outputs)\n The predicted classes, or the predict values.\n " }, @@ -222591,7 +221654,7 @@ "qname": "sklearn.tree._classes.DecisionTreeClassifier.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -222604,14 +221667,14 @@ "qname": "sklearn.tree._classes.DecisionTreeClassifier.__init__.criterion", "default_value": "'gini'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{\"gini\", \"entropy\", \"log_loss\"}, default=\"gini\"", "description": "The function to measure the quality of a split. Supported criteria are\n\"gini\" for the Gini impurity and \"log_loss\" and \"entropy\" both for the\nShannon information gain, see :ref:`tree_mathematical_formulation`." }, "type": { "kind": "EnumType", - "values": ["entropy", "log_loss", "gini"] + "values": ["gini", "entropy", "log_loss"] } }, { @@ -222620,14 +221683,14 @@ "qname": "sklearn.tree._classes.DecisionTreeClassifier.__init__.splitter", "default_value": "'best'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{\"best\", \"random\"}, default=\"best\"", "description": "The strategy used to choose the split at each node. Supported\nstrategies are \"best\" to choose the best split and \"random\" to choose\nthe best random split." }, "type": { "kind": "EnumType", - "values": ["best", "random"] + "values": ["random", "best"] } }, { @@ -222636,7 +221699,7 @@ "qname": "sklearn.tree._classes.DecisionTreeClassifier.__init__.max_depth", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "The maximum depth of the tree. If None, then nodes are expanded until\nall leaves are pure or until all leaves contain less than\nmin_samples_split samples." @@ -222652,7 +221715,7 @@ "qname": "sklearn.tree._classes.DecisionTreeClassifier.__init__.min_samples_split", "default_value": "2", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or float, default=2", "description": "The minimum number of samples required to split an internal node:\n\n- If int, then consider `min_samples_split` as the minimum number.\n- If float, then `min_samples_split` is a fraction and\n `ceil(min_samples_split * n_samples)` are the minimum\n number of samples for each split.\n\n.. versionchanged:: 0.18\n Added float values for fractions." @@ -222677,7 +221740,7 @@ "qname": "sklearn.tree._classes.DecisionTreeClassifier.__init__.min_samples_leaf", "default_value": "1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or float, default=1", "description": "The minimum number of samples required to be at a leaf node.\nA split point at any depth will only be considered if it leaves at\nleast ``min_samples_leaf`` training samples in each of the left and\nright branches. This may have the effect of smoothing the model,\nespecially in regression.\n\n- If int, then consider `min_samples_leaf` as the minimum number.\n- If float, then `min_samples_leaf` is a fraction and\n `ceil(min_samples_leaf * n_samples)` are the minimum\n number of samples for each node.\n\n.. versionchanged:: 0.18\n Added float values for fractions." @@ -222702,7 +221765,7 @@ "qname": "sklearn.tree._classes.DecisionTreeClassifier.__init__.min_weight_fraction_leaf", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0", "description": "The minimum weighted fraction of the sum total of weights (of all\nthe input samples) required to be at a leaf node. Samples have\nequal weight when sample_weight is not provided." @@ -222718,7 +221781,7 @@ "qname": "sklearn.tree._classes.DecisionTreeClassifier.__init__.max_features", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, float or {\"auto\", \"sqrt\", \"log2\"}, default=None", "description": "The number of features to consider when looking for the best split:\n\n - If int, then consider `max_features` features at each split.\n - If float, then `max_features` is a fraction and\n `int(max_features * n_features)` features are considered at each\n split.\n - If \"auto\", then `max_features=sqrt(n_features)`.\n - If \"sqrt\", then `max_features=sqrt(n_features)`.\n - If \"log2\", then `max_features=log2(n_features)`.\n - If None, then `max_features=n_features`.\n\n .. deprecated:: 1.1\n The `\"auto\"` option was deprecated in 1.1 and will be removed\n in 1.3.\n\nNote: the search for a split does not stop until at least one\nvalid partition of the node samples is found, even if it requires to\neffectively inspect more than ``max_features`` features." @@ -222728,7 +221791,7 @@ "types": [ { "kind": "EnumType", - "values": ["sqrt", "log2", "auto"] + "values": ["auto", "log2", "sqrt"] }, { "kind": "NamedType", @@ -222747,7 +221810,7 @@ "qname": "sklearn.tree._classes.DecisionTreeClassifier.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Controls the randomness of the estimator. The features are always\nrandomly permuted at each split, even if ``splitter`` is set to\n``\"best\"``. When ``max_features < n_features``, the algorithm will\nselect ``max_features`` at random at each split before finding the best\nsplit among them. But the best found split may vary across different\nruns, even if ``max_features=n_features``. That is the case, if the\nimprovement of the criterion is identical for several splits and one\nsplit has to be selected at random. To obtain a deterministic behaviour\nduring fitting, ``random_state`` has to be fixed to an integer.\nSee :term:`Glossary ` for details." @@ -222776,7 +221839,7 @@ "qname": "sklearn.tree._classes.DecisionTreeClassifier.__init__.max_leaf_nodes", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Grow a tree with ``max_leaf_nodes`` in best-first fashion.\nBest nodes are defined as relative reduction in impurity.\nIf None then unlimited number of leaf nodes." @@ -222792,7 +221855,7 @@ "qname": "sklearn.tree._classes.DecisionTreeClassifier.__init__.min_impurity_decrease", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0", "description": "A node will be split if this split induces a decrease of the impurity\ngreater than or equal to this value.\n\nThe weighted impurity decrease equation is the following::\n\n N_t / N * (impurity - N_t_R / N_t * right_impurity\n - N_t_L / N_t * left_impurity)\n\nwhere ``N`` is the total number of samples, ``N_t`` is the number of\nsamples at the current node, ``N_t_L`` is the number of samples in the\nleft child, and ``N_t_R`` is the number of samples in the right child.\n\n``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,\nif ``sample_weight`` is passed.\n\n.. versionadded:: 0.19" @@ -222808,7 +221871,7 @@ "qname": "sklearn.tree._classes.DecisionTreeClassifier.__init__.class_weight", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict, list of dict or \"balanced\", default=None", "description": "Weights associated with classes in the form ``{class_label: weight}``.\nIf None, all classes are supposed to have weight one. For\nmulti-output problems, a list of dicts can be provided in the same\norder as the columns of y.\n\nNote that for multioutput (including multilabel) weights should be\ndefined for each class of every column in its own dict. For example,\nfor four-class multilabel classification weights should be\n[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of\n[{1:1}, {2:5}, {3:1}, {4:1}].\n\nThe \"balanced\" mode uses the values of y to automatically adjust\nweights inversely proportional to class frequencies in the input data\nas ``n_samples / (n_classes * np.bincount(y))``\n\nFor multi-output, the weights of each column of y will be multiplied.\n\nNote that these weights will be multiplied with sample_weight (passed\nthrough the fit method) if sample_weight is specified." @@ -222837,7 +221900,7 @@ "qname": "sklearn.tree._classes.DecisionTreeClassifier.__init__.ccp_alpha", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "non-negative float, default=0.0", "description": "Complexity parameter used for Minimal Cost-Complexity Pruning. The\nsubtree with the largest cost complexity that is smaller than\n``ccp_alpha`` will be chosen. By default, no pruning is performed. See\n:ref:`minimal_cost_complexity_pruning` for details.\n\n.. versionadded:: 0.22" @@ -222849,7 +221912,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -222890,7 +221953,7 @@ "qname": "sklearn.tree._classes.DecisionTreeClassifier.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -222903,7 +221966,7 @@ "qname": "sklearn.tree._classes.DecisionTreeClassifier.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The training input samples. Internally, it will be converted to\n``dtype=np.float32`` and if a sparse matrix is provided\nto a sparse ``csc_matrix``." @@ -222928,7 +221991,7 @@ "qname": "sklearn.tree._classes.DecisionTreeClassifier.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,) or (n_samples, n_outputs)", "description": "The target values (class labels) as integers or strings." @@ -222944,7 +222007,7 @@ "qname": "sklearn.tree._classes.DecisionTreeClassifier.fit.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "Sample weights. If None, then samples are equally weighted. Splits\nthat would create child nodes with net zero or negative weight are\nignored while searching for a split in each node. Splits are also\nignored if they would result in any single class carrying a\nnegative weight in either child node." @@ -222960,7 +222023,7 @@ "qname": "sklearn.tree._classes.DecisionTreeClassifier.fit.check_input", "default_value": "True", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Allow to bypass several input checking.\nDon't use this parameter unless you know what you do." @@ -222972,7 +222035,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Build a decision tree classifier from the training set (X, y).", "docstring": "Build a decision tree classifier from the training set (X, y).\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The training input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csc_matrix``.\n\n y : array-like of shape (n_samples,) or (n_samples, n_outputs)\n The target values (class labels) as integers or strings.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights. If None, then samples are equally weighted. Splits\n that would create child nodes with net zero or negative weight are\n ignored while searching for a split in each node. Splits are also\n ignored if they would result in any single class carrying a\n negative weight in either child node.\n\n check_input : bool, default=True\n Allow to bypass several input checking.\n Don't use this parameter unless you know what you do.\n\n Returns\n -------\n self : DecisionTreeClassifier\n Fitted estimator.\n " }, @@ -222991,7 +222054,7 @@ "qname": "sklearn.tree._classes.DecisionTreeClassifier.n_features_.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -223000,7 +222063,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -223016,7 +222079,7 @@ "qname": "sklearn.tree._classes.DecisionTreeClassifier.predict_log_proba.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -223029,7 +222092,7 @@ "qname": "sklearn.tree._classes.DecisionTreeClassifier.predict_log_proba.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The input samples. Internally, it will be converted to\n``dtype=np.float32`` and if a sparse matrix is provided\nto a sparse ``csr_matrix``." @@ -223050,7 +222113,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict class log-probabilities of the input samples X.", "docstring": "Predict class log-probabilities of the input samples X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csr_matrix``.\n\n Returns\n -------\n proba : ndarray of shape (n_samples, n_classes) or list of n_outputs such arrays if n_outputs > 1\n The class log-probabilities of the input samples. The order of the\n classes corresponds to that in the attribute :term:`classes_`.\n " }, @@ -223066,7 +222129,7 @@ "qname": "sklearn.tree._classes.DecisionTreeClassifier.predict_proba.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -223079,7 +222142,7 @@ "qname": "sklearn.tree._classes.DecisionTreeClassifier.predict_proba.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The input samples. Internally, it will be converted to\n``dtype=np.float32`` and if a sparse matrix is provided\nto a sparse ``csr_matrix``." @@ -223104,7 +222167,7 @@ "qname": "sklearn.tree._classes.DecisionTreeClassifier.predict_proba.check_input", "default_value": "True", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Allow to bypass several input checking.\nDon't use this parameter unless you know what you do." @@ -223116,7 +222179,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Predict class probabilities of the input samples X.\n\nThe predicted class probability is the fraction of samples of the same\nclass in a leaf.", "docstring": "Predict class probabilities of the input samples X.\n\n The predicted class probability is the fraction of samples of the same\n class in a leaf.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csr_matrix``.\n\n check_input : bool, default=True\n Allow to bypass several input checking.\n Don't use this parameter unless you know what you do.\n\n Returns\n -------\n proba : ndarray of shape (n_samples, n_classes) or list of n_outputs such arrays if n_outputs > 1\n The class probabilities of the input samples. The order of the\n classes corresponds to that in the attribute :term:`classes_`.\n " }, @@ -223132,7 +222195,7 @@ "qname": "sklearn.tree._classes.DecisionTreeRegressor.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -223145,14 +222208,14 @@ "qname": "sklearn.tree._classes.DecisionTreeRegressor.__init__.criterion", "default_value": "'squared_error'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{\"squared_error\", \"friedman_mse\", \"absolute_error\", \"poisson\"}, default=\"squared_error\"", "description": "The function to measure the quality of a split. Supported criteria\nare \"squared_error\" for the mean squared error, which is equal to\nvariance reduction as feature selection criterion and minimizes the L2\nloss using the mean of each terminal node, \"friedman_mse\", which uses\nmean squared error with Friedman's improvement score for potential\nsplits, \"absolute_error\" for the mean absolute error, which minimizes\nthe L1 loss using the median of each terminal node, and \"poisson\" which\nuses reduction in Poisson deviance to find splits.\n\n.. versionadded:: 0.18\n Mean Absolute Error (MAE) criterion.\n\n.. versionadded:: 0.24\n Poisson deviance criterion.\n\n.. deprecated:: 1.0\n Criterion \"mse\" was deprecated in v1.0 and will be removed in\n version 1.2. Use `criterion=\"squared_error\"` which is equivalent.\n\n.. deprecated:: 1.0\n Criterion \"mae\" was deprecated in v1.0 and will be removed in\n version 1.2. Use `criterion=\"absolute_error\"` which is equivalent." }, "type": { "kind": "EnumType", - "values": ["poisson", "squared_error", "absolute_error", "friedman_mse"] + "values": ["friedman_mse", "squared_error", "poisson", "absolute_error"] } }, { @@ -223161,14 +222224,14 @@ "qname": "sklearn.tree._classes.DecisionTreeRegressor.__init__.splitter", "default_value": "'best'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{\"best\", \"random\"}, default=\"best\"", "description": "The strategy used to choose the split at each node. Supported\nstrategies are \"best\" to choose the best split and \"random\" to choose\nthe best random split." }, "type": { "kind": "EnumType", - "values": ["best", "random"] + "values": ["random", "best"] } }, { @@ -223177,7 +222240,7 @@ "qname": "sklearn.tree._classes.DecisionTreeRegressor.__init__.max_depth", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "The maximum depth of the tree. If None, then nodes are expanded until\nall leaves are pure or until all leaves contain less than\nmin_samples_split samples." @@ -223193,7 +222256,7 @@ "qname": "sklearn.tree._classes.DecisionTreeRegressor.__init__.min_samples_split", "default_value": "2", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or float, default=2", "description": "The minimum number of samples required to split an internal node:\n\n- If int, then consider `min_samples_split` as the minimum number.\n- If float, then `min_samples_split` is a fraction and\n `ceil(min_samples_split * n_samples)` are the minimum\n number of samples for each split.\n\n.. versionchanged:: 0.18\n Added float values for fractions." @@ -223218,7 +222281,7 @@ "qname": "sklearn.tree._classes.DecisionTreeRegressor.__init__.min_samples_leaf", "default_value": "1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or float, default=1", "description": "The minimum number of samples required to be at a leaf node.\nA split point at any depth will only be considered if it leaves at\nleast ``min_samples_leaf`` training samples in each of the left and\nright branches. This may have the effect of smoothing the model,\nespecially in regression.\n\n- If int, then consider `min_samples_leaf` as the minimum number.\n- If float, then `min_samples_leaf` is a fraction and\n `ceil(min_samples_leaf * n_samples)` are the minimum\n number of samples for each node.\n\n.. versionchanged:: 0.18\n Added float values for fractions." @@ -223243,7 +222306,7 @@ "qname": "sklearn.tree._classes.DecisionTreeRegressor.__init__.min_weight_fraction_leaf", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0", "description": "The minimum weighted fraction of the sum total of weights (of all\nthe input samples) required to be at a leaf node. Samples have\nequal weight when sample_weight is not provided." @@ -223259,7 +222322,7 @@ "qname": "sklearn.tree._classes.DecisionTreeRegressor.__init__.max_features", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, float or {\"auto\", \"sqrt\", \"log2\"}, default=None", "description": "The number of features to consider when looking for the best split:\n\n- If int, then consider `max_features` features at each split.\n- If float, then `max_features` is a fraction and\n `int(max_features * n_features)` features are considered at each\n split.\n- If \"auto\", then `max_features=n_features`.\n- If \"sqrt\", then `max_features=sqrt(n_features)`.\n- If \"log2\", then `max_features=log2(n_features)`.\n- If None, then `max_features=n_features`.\n\n.. deprecated:: 1.1\n The `\"auto\"` option was deprecated in 1.1 and will be removed\n in 1.3.\n\nNote: the search for a split does not stop until at least one\nvalid partition of the node samples is found, even if it requires to\neffectively inspect more than ``max_features`` features." @@ -223269,7 +222332,7 @@ "types": [ { "kind": "EnumType", - "values": ["sqrt", "log2", "auto"] + "values": ["auto", "log2", "sqrt"] }, { "kind": "NamedType", @@ -223288,7 +222351,7 @@ "qname": "sklearn.tree._classes.DecisionTreeRegressor.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Controls the randomness of the estimator. The features are always\nrandomly permuted at each split, even if ``splitter`` is set to\n``\"best\"``. When ``max_features < n_features``, the algorithm will\nselect ``max_features`` at random at each split before finding the best\nsplit among them. But the best found split may vary across different\nruns, even if ``max_features=n_features``. That is the case, if the\nimprovement of the criterion is identical for several splits and one\nsplit has to be selected at random. To obtain a deterministic behaviour\nduring fitting, ``random_state`` has to be fixed to an integer.\nSee :term:`Glossary ` for details." @@ -223317,7 +222380,7 @@ "qname": "sklearn.tree._classes.DecisionTreeRegressor.__init__.max_leaf_nodes", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Grow a tree with ``max_leaf_nodes`` in best-first fashion.\nBest nodes are defined as relative reduction in impurity.\nIf None then unlimited number of leaf nodes." @@ -223333,7 +222396,7 @@ "qname": "sklearn.tree._classes.DecisionTreeRegressor.__init__.min_impurity_decrease", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0", "description": "A node will be split if this split induces a decrease of the impurity\ngreater than or equal to this value.\n\nThe weighted impurity decrease equation is the following::\n\n N_t / N * (impurity - N_t_R / N_t * right_impurity\n - N_t_L / N_t * left_impurity)\n\nwhere ``N`` is the total number of samples, ``N_t`` is the number of\nsamples at the current node, ``N_t_L`` is the number of samples in the\nleft child, and ``N_t_R`` is the number of samples in the right child.\n\n``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,\nif ``sample_weight`` is passed.\n\n.. versionadded:: 0.19" @@ -223349,7 +222412,7 @@ "qname": "sklearn.tree._classes.DecisionTreeRegressor.__init__.ccp_alpha", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "non-negative float, default=0.0", "description": "Complexity parameter used for Minimal Cost-Complexity Pruning. The\nsubtree with the largest cost complexity that is smaller than\n``ccp_alpha`` will be chosen. By default, no pruning is performed. See\n:ref:`minimal_cost_complexity_pruning` for details.\n\n.. versionadded:: 0.22" @@ -223361,7 +222424,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -223434,7 +222497,7 @@ "qname": "sklearn.tree._classes.DecisionTreeRegressor.fit.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -223447,7 +222510,7 @@ "qname": "sklearn.tree._classes.DecisionTreeRegressor.fit.X", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "{array-like, sparse matrix} of shape (n_samples, n_features)", "description": "The training input samples. Internally, it will be converted to\n``dtype=np.float32`` and if a sparse matrix is provided\nto a sparse ``csc_matrix``." @@ -223472,7 +222535,7 @@ "qname": "sklearn.tree._classes.DecisionTreeRegressor.fit.y", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,) or (n_samples, n_outputs)", "description": "The target values (real numbers). Use ``dtype=np.float64`` and\n``order='C'`` for maximum efficiency." @@ -223488,7 +222551,7 @@ "qname": "sklearn.tree._classes.DecisionTreeRegressor.fit.sample_weight", "default_value": "None", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "array-like of shape (n_samples,), default=None", "description": "Sample weights. If None, then samples are equally weighted. Splits\nthat would create child nodes with net zero or negative weight are\nignored while searching for a split in each node." @@ -223504,7 +222567,7 @@ "qname": "sklearn.tree._classes.DecisionTreeRegressor.fit.check_input", "default_value": "True", "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "bool, default=True", "description": "Allow to bypass several input checking.\nDon't use this parameter unless you know what you do." @@ -223516,7 +222579,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "Build a decision tree regressor from the training set (X, y).", "docstring": "Build a decision tree regressor from the training set (X, y).\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The training input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csc_matrix``.\n\n y : array-like of shape (n_samples,) or (n_samples, n_outputs)\n The target values (real numbers). Use ``dtype=np.float64`` and\n ``order='C'`` for maximum efficiency.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights. If None, then samples are equally weighted. Splits\n that would create child nodes with net zero or negative weight are\n ignored while searching for a split in each node.\n\n check_input : bool, default=True\n Allow to bypass several input checking.\n Don't use this parameter unless you know what you do.\n\n Returns\n -------\n self : DecisionTreeRegressor\n Fitted estimator.\n " }, @@ -223535,7 +222598,7 @@ "qname": "sklearn.tree._classes.DecisionTreeRegressor.n_features_.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -223544,7 +222607,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -223560,7 +222623,7 @@ "qname": "sklearn.tree._classes.ExtraTreeClassifier.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -223573,14 +222636,14 @@ "qname": "sklearn.tree._classes.ExtraTreeClassifier.__init__.criterion", "default_value": "'gini'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{\"gini\", \"entropy\", \"log_loss\"}, default=\"gini\"", "description": "The function to measure the quality of a split. Supported criteria are\n\"gini\" for the Gini impurity and \"log_loss\" and \"entropy\" both for the\nShannon information gain, see :ref:`tree_mathematical_formulation`." }, "type": { "kind": "EnumType", - "values": ["entropy", "log_loss", "gini"] + "values": ["gini", "entropy", "log_loss"] } }, { @@ -223589,14 +222652,14 @@ "qname": "sklearn.tree._classes.ExtraTreeClassifier.__init__.splitter", "default_value": "'random'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{\"random\", \"best\"}, default=\"random\"", "description": "The strategy used to choose the split at each node. Supported\nstrategies are \"best\" to choose the best split and \"random\" to choose\nthe best random split." }, "type": { "kind": "EnumType", - "values": ["best", "random"] + "values": ["random", "best"] } }, { @@ -223605,7 +222668,7 @@ "qname": "sklearn.tree._classes.ExtraTreeClassifier.__init__.max_depth", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "The maximum depth of the tree. If None, then nodes are expanded until\nall leaves are pure or until all leaves contain less than\nmin_samples_split samples." @@ -223621,7 +222684,7 @@ "qname": "sklearn.tree._classes.ExtraTreeClassifier.__init__.min_samples_split", "default_value": "2", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or float, default=2", "description": "The minimum number of samples required to split an internal node:\n\n- If int, then consider `min_samples_split` as the minimum number.\n- If float, then `min_samples_split` is a fraction and\n `ceil(min_samples_split * n_samples)` are the minimum\n number of samples for each split.\n\n.. versionchanged:: 0.18\n Added float values for fractions." @@ -223646,7 +222709,7 @@ "qname": "sklearn.tree._classes.ExtraTreeClassifier.__init__.min_samples_leaf", "default_value": "1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or float, default=1", "description": "The minimum number of samples required to be at a leaf node.\nA split point at any depth will only be considered if it leaves at\nleast ``min_samples_leaf`` training samples in each of the left and\nright branches. This may have the effect of smoothing the model,\nespecially in regression.\n\n- If int, then consider `min_samples_leaf` as the minimum number.\n- If float, then `min_samples_leaf` is a fraction and\n `ceil(min_samples_leaf * n_samples)` are the minimum\n number of samples for each node.\n\n.. versionchanged:: 0.18\n Added float values for fractions." @@ -223671,7 +222734,7 @@ "qname": "sklearn.tree._classes.ExtraTreeClassifier.__init__.min_weight_fraction_leaf", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0", "description": "The minimum weighted fraction of the sum total of weights (of all\nthe input samples) required to be at a leaf node. Samples have\nequal weight when sample_weight is not provided." @@ -223687,7 +222750,7 @@ "qname": "sklearn.tree._classes.ExtraTreeClassifier.__init__.max_features", "default_value": "'sqrt'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, float, {\"auto\", \"sqrt\", \"log2\"} or None, default=\"sqrt\"", "description": "The number of features to consider when looking for the best split:\n\n - If int, then consider `max_features` features at each split.\n - If float, then `max_features` is a fraction and\n `int(max_features * n_features)` features are considered at each\n split.\n - If \"auto\", then `max_features=sqrt(n_features)`.\n - If \"sqrt\", then `max_features=sqrt(n_features)`.\n - If \"log2\", then `max_features=log2(n_features)`.\n - If None, then `max_features=n_features`.\n\n .. versionchanged:: 1.1\n The default of `max_features` changed from `\"auto\"` to `\"sqrt\"`.\n\n .. deprecated:: 1.1\n The `\"auto\"` option was deprecated in 1.1 and will be removed\n in 1.3.\n\nNote: the search for a split does not stop until at least one\nvalid partition of the node samples is found, even if it requires to\neffectively inspect more than ``max_features`` features." @@ -223697,7 +222760,7 @@ "types": [ { "kind": "EnumType", - "values": ["sqrt", "log2", "auto"] + "values": ["auto", "log2", "sqrt"] }, { "kind": "NamedType", @@ -223720,7 +222783,7 @@ "qname": "sklearn.tree._classes.ExtraTreeClassifier.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Used to pick randomly the `max_features` used at each split.\nSee :term:`Glossary ` for details." @@ -223749,7 +222812,7 @@ "qname": "sklearn.tree._classes.ExtraTreeClassifier.__init__.max_leaf_nodes", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Grow a tree with ``max_leaf_nodes`` in best-first fashion.\nBest nodes are defined as relative reduction in impurity.\nIf None then unlimited number of leaf nodes." @@ -223765,7 +222828,7 @@ "qname": "sklearn.tree._classes.ExtraTreeClassifier.__init__.min_impurity_decrease", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0", "description": "A node will be split if this split induces a decrease of the impurity\ngreater than or equal to this value.\n\nThe weighted impurity decrease equation is the following::\n\n N_t / N * (impurity - N_t_R / N_t * right_impurity\n - N_t_L / N_t * left_impurity)\n\nwhere ``N`` is the total number of samples, ``N_t`` is the number of\nsamples at the current node, ``N_t_L`` is the number of samples in the\nleft child, and ``N_t_R`` is the number of samples in the right child.\n\n``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,\nif ``sample_weight`` is passed.\n\n.. versionadded:: 0.19" @@ -223781,7 +222844,7 @@ "qname": "sklearn.tree._classes.ExtraTreeClassifier.__init__.class_weight", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "dict, list of dict or \"balanced\", default=None", "description": "Weights associated with classes in the form ``{class_label: weight}``.\nIf None, all classes are supposed to have weight one. For\nmulti-output problems, a list of dicts can be provided in the same\norder as the columns of y.\n\nNote that for multioutput (including multilabel) weights should be\ndefined for each class of every column in its own dict. For example,\nfor four-class multilabel classification weights should be\n[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of\n[{1:1}, {2:5}, {3:1}, {4:1}].\n\nThe \"balanced\" mode uses the values of y to automatically adjust\nweights inversely proportional to class frequencies in the input data\nas ``n_samples / (n_classes * np.bincount(y))``\n\nFor multi-output, the weights of each column of y will be multiplied.\n\nNote that these weights will be multiplied with sample_weight (passed\nthrough the fit method) if sample_weight is specified." @@ -223810,7 +222873,7 @@ "qname": "sklearn.tree._classes.ExtraTreeClassifier.__init__.ccp_alpha", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "non-negative float, default=0.0", "description": "Complexity parameter used for Minimal Cost-Complexity Pruning. The\nsubtree with the largest cost complexity that is smaller than\n``ccp_alpha`` will be chosen. By default, no pruning is performed. See\n:ref:`minimal_cost_complexity_pruning` for details.\n\n.. versionadded:: 0.22" @@ -223822,7 +222885,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -223838,7 +222901,7 @@ "qname": "sklearn.tree._classes.ExtraTreeRegressor.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -223851,14 +222914,14 @@ "qname": "sklearn.tree._classes.ExtraTreeRegressor.__init__.criterion", "default_value": "'squared_error'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{\"squared_error\", \"friedman_mse\"}, default=\"squared_error\"", "description": "The function to measure the quality of a split. Supported criteria\nare \"squared_error\" for the mean squared error, which is equal to\nvariance reduction as feature selection criterion and \"mae\" for the\nmean absolute error.\n\n.. versionadded:: 0.18\n Mean Absolute Error (MAE) criterion.\n\n.. versionadded:: 0.24\n Poisson deviance criterion.\n\n.. deprecated:: 1.0\n Criterion \"mse\" was deprecated in v1.0 and will be removed in\n version 1.2. Use `criterion=\"squared_error\"` which is equivalent.\n\n.. deprecated:: 1.0\n Criterion \"mae\" was deprecated in v1.0 and will be removed in\n version 1.2. Use `criterion=\"absolute_error\"` which is equivalent." }, "type": { "kind": "EnumType", - "values": ["squared_error", "friedman_mse"] + "values": ["friedman_mse", "squared_error"] } }, { @@ -223867,14 +222930,14 @@ "qname": "sklearn.tree._classes.ExtraTreeRegressor.__init__.splitter", "default_value": "'random'", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "{\"random\", \"best\"}, default=\"random\"", "description": "The strategy used to choose the split at each node. Supported\nstrategies are \"best\" to choose the best split and \"random\" to choose\nthe best random split." }, "type": { "kind": "EnumType", - "values": ["best", "random"] + "values": ["random", "best"] } }, { @@ -223883,7 +222946,7 @@ "qname": "sklearn.tree._classes.ExtraTreeRegressor.__init__.max_depth", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "The maximum depth of the tree. If None, then nodes are expanded until\nall leaves are pure or until all leaves contain less than\nmin_samples_split samples." @@ -223899,7 +222962,7 @@ "qname": "sklearn.tree._classes.ExtraTreeRegressor.__init__.min_samples_split", "default_value": "2", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or float, default=2", "description": "The minimum number of samples required to split an internal node:\n\n- If int, then consider `min_samples_split` as the minimum number.\n- If float, then `min_samples_split` is a fraction and\n `ceil(min_samples_split * n_samples)` are the minimum\n number of samples for each split.\n\n.. versionchanged:: 0.18\n Added float values for fractions." @@ -223924,7 +222987,7 @@ "qname": "sklearn.tree._classes.ExtraTreeRegressor.__init__.min_samples_leaf", "default_value": "1", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int or float, default=1", "description": "The minimum number of samples required to be at a leaf node.\nA split point at any depth will only be considered if it leaves at\nleast ``min_samples_leaf`` training samples in each of the left and\nright branches. This may have the effect of smoothing the model,\nespecially in regression.\n\n- If int, then consider `min_samples_leaf` as the minimum number.\n- If float, then `min_samples_leaf` is a fraction and\n `ceil(min_samples_leaf * n_samples)` are the minimum\n number of samples for each node.\n\n.. versionchanged:: 0.18\n Added float values for fractions." @@ -223949,7 +223012,7 @@ "qname": "sklearn.tree._classes.ExtraTreeRegressor.__init__.min_weight_fraction_leaf", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0", "description": "The minimum weighted fraction of the sum total of weights (of all\nthe input samples) required to be at a leaf node. Samples have\nequal weight when sample_weight is not provided." @@ -223965,7 +223028,7 @@ "qname": "sklearn.tree._classes.ExtraTreeRegressor.__init__.max_features", "default_value": "1.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, float, {\"auto\", \"sqrt\", \"log2\"} or None, default=1.0", "description": "The number of features to consider when looking for the best split:\n\n- If int, then consider `max_features` features at each split.\n- If float, then `max_features` is a fraction and\n `int(max_features * n_features)` features are considered at each\n split.\n- If \"auto\", then `max_features=n_features`.\n- If \"sqrt\", then `max_features=sqrt(n_features)`.\n- If \"log2\", then `max_features=log2(n_features)`.\n- If None, then `max_features=n_features`.\n\n.. versionchanged:: 1.1\n The default of `max_features` changed from `\"auto\"` to `1.0`.\n\n.. deprecated:: 1.1\n The `\"auto\"` option was deprecated in 1.1 and will be removed\n in 1.3.\n\nNote: the search for a split does not stop until at least one\nvalid partition of the node samples is found, even if it requires to\neffectively inspect more than ``max_features`` features." @@ -223975,7 +223038,7 @@ "types": [ { "kind": "EnumType", - "values": ["sqrt", "log2", "auto"] + "values": ["auto", "log2", "sqrt"] }, { "kind": "NamedType", @@ -223998,7 +223061,7 @@ "qname": "sklearn.tree._classes.ExtraTreeRegressor.__init__.random_state", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, RandomState instance or None, default=None", "description": "Used to pick randomly the `max_features` used at each split.\nSee :term:`Glossary ` for details." @@ -224027,7 +223090,7 @@ "qname": "sklearn.tree._classes.ExtraTreeRegressor.__init__.min_impurity_decrease", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "float, default=0.0", "description": "A node will be split if this split induces a decrease of the impurity\ngreater than or equal to this value.\n\nThe weighted impurity decrease equation is the following::\n\n N_t / N * (impurity - N_t_R / N_t * right_impurity\n - N_t_L / N_t * left_impurity)\n\nwhere ``N`` is the total number of samples, ``N_t`` is the number of\nsamples at the current node, ``N_t_L`` is the number of samples in the\nleft child, and ``N_t_R`` is the number of samples in the right child.\n\n``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,\nif ``sample_weight`` is passed.\n\n.. versionadded:: 0.19" @@ -224043,7 +223106,7 @@ "qname": "sklearn.tree._classes.ExtraTreeRegressor.__init__.max_leaf_nodes", "default_value": "None", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "int, default=None", "description": "Grow a tree with ``max_leaf_nodes`` in best-first fashion.\nBest nodes are defined as relative reduction in impurity.\nIf None then unlimited number of leaf nodes." @@ -224059,7 +223122,7 @@ "qname": "sklearn.tree._classes.ExtraTreeRegressor.__init__.ccp_alpha", "default_value": "0.0", "assigned_by": "NAME_ONLY", - "is_public": false, + "is_public": true, "docstring": { "type": "non-negative float, default=0.0", "description": "Complexity parameter used for Minimal Cost-Complexity Pruning. The\nsubtree with the largest cost complexity that is smaller than\n``ccp_alpha`` will be chosen. By default, no pruning is performed. See\n:ref:`minimal_cost_complexity_pruning` for details.\n\n.. versionadded:: 0.22" @@ -224071,7 +223134,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -225401,7 +224464,7 @@ }, "type": { "kind": "EnumType", - "values": ["none", "root", "all"] + "values": ["root", "all", "none"] } }, { @@ -225779,7 +224842,7 @@ }, "type": { "kind": "EnumType", - "values": ["none", "root", "all"] + "values": ["root", "all", "none"] } }, { @@ -226669,7 +225732,7 @@ "qname": "sklearn.utils._bunch.Bunch.__dir__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -226678,7 +225741,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -226694,7 +225757,7 @@ "qname": "sklearn.utils._bunch.Bunch.__getattr__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -226707,7 +225770,7 @@ "qname": "sklearn.utils._bunch.Bunch.__getattr__.key", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -226716,7 +225779,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -226732,7 +225795,7 @@ "qname": "sklearn.utils._bunch.Bunch.__init__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -226741,7 +225804,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -226757,7 +225820,7 @@ "qname": "sklearn.utils._bunch.Bunch.__setattr__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -226770,7 +225833,7 @@ "qname": "sklearn.utils._bunch.Bunch.__setattr__.key", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -226783,7 +225846,7 @@ "qname": "sklearn.utils._bunch.Bunch.__setattr__.value", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -226792,7 +225855,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -226808,7 +225871,7 @@ "qname": "sklearn.utils._bunch.Bunch.__setstate__.self", "default_value": null, "assigned_by": "IMPLICIT", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -226821,7 +225884,7 @@ "qname": "sklearn.utils._bunch.Bunch.__setstate__.state", "default_value": null, "assigned_by": "POSITION_OR_NAME", - "is_public": false, + "is_public": true, "docstring": { "type": "", "description": "" @@ -226830,7 +225893,7 @@ } ], "results": [], - "is_public": false, + "is_public": true, "description": "", "docstring": null }, @@ -227527,7 +226590,7 @@ }, "type": { "kind": "EnumType", - "values": ["parallel", "serial", "single"] + "values": ["single", "serial", "parallel"] } }, { @@ -228960,1108 +228023,6 @@ "description": "", "docstring": null }, - { - "id": "sklearn/sklearn.utils._param_validation/Interval/__contains__", - "name": "__contains__", - "qname": "sklearn.utils._param_validation.Interval.__contains__", - "decorators": [], - "parameters": [ - { - "id": "sklearn/sklearn.utils._param_validation/Interval/__contains__/self", - "name": "self", - "qname": "sklearn.utils._param_validation.Interval.__contains__.self", - "default_value": null, - "assigned_by": "IMPLICIT", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - }, - { - "id": "sklearn/sklearn.utils._param_validation/Interval/__contains__/val", - "name": "val", - "qname": "sklearn.utils._param_validation.Interval.__contains__.val", - "default_value": null, - "assigned_by": "POSITION_OR_NAME", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - } - ], - "results": [], - "is_public": false, - "description": "", - "docstring": null - }, - { - "id": "sklearn/sklearn.utils._param_validation/Interval/__init__", - "name": "__init__", - "qname": "sklearn.utils._param_validation.Interval.__init__", - "decorators": [ - "validate_params({'type': [type], 'left': [Integral, Real, None], 'right': [Integral, Real, None], 'closed': [StrOptions({'left', 'right', 'both', 'neither'})]})" - ], - "parameters": [ - { - "id": "sklearn/sklearn.utils._param_validation/Interval/__init__/self", - "name": "self", - "qname": "sklearn.utils._param_validation.Interval.__init__.self", - "default_value": null, - "assigned_by": "IMPLICIT", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - }, - { - "id": "sklearn/sklearn.utils._param_validation/Interval/__init__/type", - "name": "type", - "qname": "sklearn.utils._param_validation.Interval.__init__.type", - "default_value": null, - "assigned_by": "POSITION_OR_NAME", - "is_public": false, - "docstring": { - "type": "{numbers.Integral, numbers.Real}", - "description": "The set of numbers in which to set the interval." - }, - "type": { - "kind": "EnumType", - "values": [] - } - }, - { - "id": "sklearn/sklearn.utils._param_validation/Interval/__init__/left", - "name": "left", - "qname": "sklearn.utils._param_validation.Interval.__init__.left", - "default_value": null, - "assigned_by": "POSITION_OR_NAME", - "is_public": false, - "docstring": { - "type": "float or int or None", - "description": "The left bound of the interval. None means left bound is -\u221e." - }, - "type": { - "kind": "UnionType", - "types": [ - { - "kind": "NamedType", - "name": "float" - }, - { - "kind": "NamedType", - "name": "int" - }, - { - "kind": "NamedType", - "name": "None" - } - ] - } - }, - { - "id": "sklearn/sklearn.utils._param_validation/Interval/__init__/right", - "name": "right", - "qname": "sklearn.utils._param_validation.Interval.__init__.right", - "default_value": null, - "assigned_by": "POSITION_OR_NAME", - "is_public": false, - "docstring": { - "type": "float, int or None", - "description": "The right bound of the interval. None means right bound is +\u221e." - }, - "type": { - "kind": "UnionType", - "types": [ - { - "kind": "NamedType", - "name": "float" - }, - { - "kind": "NamedType", - "name": "int" - }, - { - "kind": "NamedType", - "name": "None" - } - ] - } - }, - { - "id": "sklearn/sklearn.utils._param_validation/Interval/__init__/closed", - "name": "closed", - "qname": "sklearn.utils._param_validation.Interval.__init__.closed", - "default_value": null, - "assigned_by": "NAME_ONLY", - "is_public": false, - "docstring": { - "type": "{\"left\", \"right\", \"both\", \"neither\"}", - "description": "Whether the interval is open or closed. Possible choices are:\n\n- `\"left\"`: the interval is closed on the left and open on the right.\n It is equivalent to the interval `[ left, right )`.\n- `\"right\"`: the interval is closed on the right and open on the left.\n It is equivalent to the interval `( left, right ]`.\n- `\"both\"`: the interval is closed.\n It is equivalent to the interval `[ left, right ]`.\n- `\"neither\"`: the interval is open.\n It is equivalent to the interval `( left, right )`." - }, - "type": { - "kind": "EnumType", - "values": ["both", "left", "right", "neither"] - } - } - ], - "results": [], - "is_public": false, - "description": "", - "docstring": null - }, - { - "id": "sklearn/sklearn.utils._param_validation/Interval/__str__", - "name": "__str__", - "qname": "sklearn.utils._param_validation.Interval.__str__", - "decorators": [], - "parameters": [ - { - "id": "sklearn/sklearn.utils._param_validation/Interval/__str__/self", - "name": "self", - "qname": "sklearn.utils._param_validation.Interval.__str__.self", - "default_value": null, - "assigned_by": "IMPLICIT", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - } - ], - "results": [], - "is_public": false, - "description": "", - "docstring": null - }, - { - "id": "sklearn/sklearn.utils._param_validation/Interval/_check_params", - "name": "_check_params", - "qname": "sklearn.utils._param_validation.Interval._check_params", - "decorators": [], - "parameters": [ - { - "id": "sklearn/sklearn.utils._param_validation/Interval/_check_params/self", - "name": "self", - "qname": "sklearn.utils._param_validation.Interval._check_params.self", - "default_value": null, - "assigned_by": "IMPLICIT", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - } - ], - "results": [], - "is_public": false, - "description": "", - "docstring": null - }, - { - "id": "sklearn/sklearn.utils._param_validation/Interval/is_satisfied_by", - "name": "is_satisfied_by", - "qname": "sklearn.utils._param_validation.Interval.is_satisfied_by", - "decorators": [], - "parameters": [ - { - "id": "sklearn/sklearn.utils._param_validation/Interval/is_satisfied_by/self", - "name": "self", - "qname": "sklearn.utils._param_validation.Interval.is_satisfied_by.self", - "default_value": null, - "assigned_by": "IMPLICIT", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - }, - { - "id": "sklearn/sklearn.utils._param_validation/Interval/is_satisfied_by/val", - "name": "val", - "qname": "sklearn.utils._param_validation.Interval.is_satisfied_by.val", - "default_value": null, - "assigned_by": "POSITION_OR_NAME", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - } - ], - "results": [], - "is_public": false, - "description": "", - "docstring": null - }, - { - "id": "sklearn/sklearn.utils._param_validation/StrOptions/__init__", - "name": "__init__", - "qname": "sklearn.utils._param_validation.StrOptions.__init__", - "decorators": ["validate_params({'options': [set], 'deprecated': [set, None]})"], - "parameters": [ - { - "id": "sklearn/sklearn.utils._param_validation/StrOptions/__init__/self", - "name": "self", - "qname": "sklearn.utils._param_validation.StrOptions.__init__.self", - "default_value": null, - "assigned_by": "IMPLICIT", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - }, - { - "id": "sklearn/sklearn.utils._param_validation/StrOptions/__init__/options", - "name": "options", - "qname": "sklearn.utils._param_validation.StrOptions.__init__.options", - "default_value": null, - "assigned_by": "POSITION_OR_NAME", - "is_public": false, - "docstring": { - "type": "set of str", - "description": "The set of valid strings." - }, - "type": { - "kind": "NamedType", - "name": "set of str" - } - }, - { - "id": "sklearn/sklearn.utils._param_validation/StrOptions/__init__/deprecated", - "name": "deprecated", - "qname": "sklearn.utils._param_validation.StrOptions.__init__.deprecated", - "default_value": "None", - "assigned_by": "POSITION_OR_NAME", - "is_public": false, - "docstring": { - "type": "set of str or None, default=None", - "description": "A subset of the `options` to mark as deprecated in the repr of the constraint." - }, - "type": { - "kind": "UnionType", - "types": [ - { - "kind": "NamedType", - "name": "set of str" - }, - { - "kind": "NamedType", - "name": "None" - } - ] - } - } - ], - "results": [], - "is_public": false, - "description": "", - "docstring": null - }, - { - "id": "sklearn/sklearn.utils._param_validation/StrOptions/__str__", - "name": "__str__", - "qname": "sklearn.utils._param_validation.StrOptions.__str__", - "decorators": [], - "parameters": [ - { - "id": "sklearn/sklearn.utils._param_validation/StrOptions/__str__/self", - "name": "self", - "qname": "sklearn.utils._param_validation.StrOptions.__str__.self", - "default_value": null, - "assigned_by": "IMPLICIT", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - } - ], - "results": [], - "is_public": false, - "description": "", - "docstring": null - }, - { - "id": "sklearn/sklearn.utils._param_validation/StrOptions/_mark_if_deprecated", - "name": "_mark_if_deprecated", - "qname": "sklearn.utils._param_validation.StrOptions._mark_if_deprecated", - "decorators": [], - "parameters": [ - { - "id": "sklearn/sklearn.utils._param_validation/StrOptions/_mark_if_deprecated/self", - "name": "self", - "qname": "sklearn.utils._param_validation.StrOptions._mark_if_deprecated.self", - "default_value": null, - "assigned_by": "IMPLICIT", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - }, - { - "id": "sklearn/sklearn.utils._param_validation/StrOptions/_mark_if_deprecated/option", - "name": "option", - "qname": "sklearn.utils._param_validation.StrOptions._mark_if_deprecated.option", - "default_value": null, - "assigned_by": "POSITION_OR_NAME", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - } - ], - "results": [], - "is_public": false, - "description": "Add a deprecated mark to an option if needed.", - "docstring": "Add a deprecated mark to an option if needed." - }, - { - "id": "sklearn/sklearn.utils._param_validation/StrOptions/is_satisfied_by", - "name": "is_satisfied_by", - "qname": "sklearn.utils._param_validation.StrOptions.is_satisfied_by", - "decorators": [], - "parameters": [ - { - "id": "sklearn/sklearn.utils._param_validation/StrOptions/is_satisfied_by/self", - "name": "self", - "qname": "sklearn.utils._param_validation.StrOptions.is_satisfied_by.self", - "default_value": null, - "assigned_by": "IMPLICIT", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - }, - { - "id": "sklearn/sklearn.utils._param_validation/StrOptions/is_satisfied_by/val", - "name": "val", - "qname": "sklearn.utils._param_validation.StrOptions.is_satisfied_by.val", - "default_value": null, - "assigned_by": "POSITION_OR_NAME", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - } - ], - "results": [], - "is_public": false, - "description": "", - "docstring": null - }, - { - "id": "sklearn/sklearn.utils._param_validation/_ArrayLikes/__str__", - "name": "__str__", - "qname": "sklearn.utils._param_validation._ArrayLikes.__str__", - "decorators": [], - "parameters": [ - { - "id": "sklearn/sklearn.utils._param_validation/_ArrayLikes/__str__/self", - "name": "self", - "qname": "sklearn.utils._param_validation._ArrayLikes.__str__.self", - "default_value": null, - "assigned_by": "IMPLICIT", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - } - ], - "results": [], - "is_public": false, - "description": "", - "docstring": null - }, - { - "id": "sklearn/sklearn.utils._param_validation/_ArrayLikes/is_satisfied_by", - "name": "is_satisfied_by", - "qname": "sklearn.utils._param_validation._ArrayLikes.is_satisfied_by", - "decorators": [], - "parameters": [ - { - "id": "sklearn/sklearn.utils._param_validation/_ArrayLikes/is_satisfied_by/self", - "name": "self", - "qname": "sklearn.utils._param_validation._ArrayLikes.is_satisfied_by.self", - "default_value": null, - "assigned_by": "IMPLICIT", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - }, - { - "id": "sklearn/sklearn.utils._param_validation/_ArrayLikes/is_satisfied_by/val", - "name": "val", - "qname": "sklearn.utils._param_validation._ArrayLikes.is_satisfied_by.val", - "default_value": null, - "assigned_by": "POSITION_OR_NAME", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - } - ], - "results": [], - "is_public": false, - "description": "", - "docstring": null - }, - { - "id": "sklearn/sklearn.utils._param_validation/_Callables/__str__", - "name": "__str__", - "qname": "sklearn.utils._param_validation._Callables.__str__", - "decorators": [], - "parameters": [ - { - "id": "sklearn/sklearn.utils._param_validation/_Callables/__str__/self", - "name": "self", - "qname": "sklearn.utils._param_validation._Callables.__str__.self", - "default_value": null, - "assigned_by": "IMPLICIT", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - } - ], - "results": [], - "is_public": false, - "description": "", - "docstring": null - }, - { - "id": "sklearn/sklearn.utils._param_validation/_Callables/is_satisfied_by", - "name": "is_satisfied_by", - "qname": "sklearn.utils._param_validation._Callables.is_satisfied_by", - "decorators": [], - "parameters": [ - { - "id": "sklearn/sklearn.utils._param_validation/_Callables/is_satisfied_by/self", - "name": "self", - "qname": "sklearn.utils._param_validation._Callables.is_satisfied_by.self", - "default_value": null, - "assigned_by": "IMPLICIT", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - }, - { - "id": "sklearn/sklearn.utils._param_validation/_Callables/is_satisfied_by/val", - "name": "val", - "qname": "sklearn.utils._param_validation._Callables.is_satisfied_by.val", - "default_value": null, - "assigned_by": "POSITION_OR_NAME", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - } - ], - "results": [], - "is_public": false, - "description": "", - "docstring": null - }, - { - "id": "sklearn/sklearn.utils._param_validation/_Constraint/__str__", - "name": "__str__", - "qname": "sklearn.utils._param_validation._Constraint.__str__", - "decorators": ["abstractmethod"], - "parameters": [ - { - "id": "sklearn/sklearn.utils._param_validation/_Constraint/__str__/self", - "name": "self", - "qname": "sklearn.utils._param_validation._Constraint.__str__.self", - "default_value": null, - "assigned_by": "IMPLICIT", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - } - ], - "results": [], - "is_public": false, - "description": "A human readable representational string of the constraint.", - "docstring": "A human readable representational string of the constraint." - }, - { - "id": "sklearn/sklearn.utils._param_validation/_Constraint/is_satisfied_by", - "name": "is_satisfied_by", - "qname": "sklearn.utils._param_validation._Constraint.is_satisfied_by", - "decorators": ["abstractmethod"], - "parameters": [ - { - "id": "sklearn/sklearn.utils._param_validation/_Constraint/is_satisfied_by/self", - "name": "self", - "qname": "sklearn.utils._param_validation._Constraint.is_satisfied_by.self", - "default_value": null, - "assigned_by": "IMPLICIT", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - }, - { - "id": "sklearn/sklearn.utils._param_validation/_Constraint/is_satisfied_by/val", - "name": "val", - "qname": "sklearn.utils._param_validation._Constraint.is_satisfied_by.val", - "default_value": null, - "assigned_by": "POSITION_OR_NAME", - "is_public": false, - "docstring": { - "type": "object", - "description": "The value to check." - }, - "type": { - "kind": "NamedType", - "name": "object" - } - } - ], - "results": [], - "is_public": false, - "description": "Whether or not a value satisfies the constraint.", - "docstring": "Whether or not a value satisfies the constraint.\n\n Parameters\n ----------\n val : object\n The value to check.\n\n Returns\n -------\n is_satisfied : bool\n Whether or not the constraint is satisfied by this value.\n " - }, - { - "id": "sklearn/sklearn.utils._param_validation/_InstancesOf/__init__", - "name": "__init__", - "qname": "sklearn.utils._param_validation._InstancesOf.__init__", - "decorators": [], - "parameters": [ - { - "id": "sklearn/sklearn.utils._param_validation/_InstancesOf/__init__/self", - "name": "self", - "qname": "sklearn.utils._param_validation._InstancesOf.__init__.self", - "default_value": null, - "assigned_by": "IMPLICIT", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - }, - { - "id": "sklearn/sklearn.utils._param_validation/_InstancesOf/__init__/type", - "name": "type", - "qname": "sklearn.utils._param_validation._InstancesOf.__init__.type", - "default_value": null, - "assigned_by": "POSITION_OR_NAME", - "is_public": false, - "docstring": { - "type": "type", - "description": "The valid type." - }, - "type": { - "kind": "NamedType", - "name": "type" - } - } - ], - "results": [], - "is_public": false, - "description": "", - "docstring": null - }, - { - "id": "sklearn/sklearn.utils._param_validation/_InstancesOf/__str__", - "name": "__str__", - "qname": "sklearn.utils._param_validation._InstancesOf.__str__", - "decorators": [], - "parameters": [ - { - "id": "sklearn/sklearn.utils._param_validation/_InstancesOf/__str__/self", - "name": "self", - "qname": "sklearn.utils._param_validation._InstancesOf.__str__.self", - "default_value": null, - "assigned_by": "IMPLICIT", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - } - ], - "results": [], - "is_public": false, - "description": "", - "docstring": null - }, - { - "id": "sklearn/sklearn.utils._param_validation/_InstancesOf/_type_name", - "name": "_type_name", - "qname": "sklearn.utils._param_validation._InstancesOf._type_name", - "decorators": [], - "parameters": [ - { - "id": "sklearn/sklearn.utils._param_validation/_InstancesOf/_type_name/self", - "name": "self", - "qname": "sklearn.utils._param_validation._InstancesOf._type_name.self", - "default_value": null, - "assigned_by": "IMPLICIT", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - }, - { - "id": "sklearn/sklearn.utils._param_validation/_InstancesOf/_type_name/t", - "name": "t", - "qname": "sklearn.utils._param_validation._InstancesOf._type_name.t", - "default_value": null, - "assigned_by": "POSITION_OR_NAME", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - } - ], - "results": [], - "is_public": false, - "description": "Convert type into human readable string.", - "docstring": "Convert type into human readable string." - }, - { - "id": "sklearn/sklearn.utils._param_validation/_InstancesOf/is_satisfied_by", - "name": "is_satisfied_by", - "qname": "sklearn.utils._param_validation._InstancesOf.is_satisfied_by", - "decorators": [], - "parameters": [ - { - "id": "sklearn/sklearn.utils._param_validation/_InstancesOf/is_satisfied_by/self", - "name": "self", - "qname": "sklearn.utils._param_validation._InstancesOf.is_satisfied_by.self", - "default_value": null, - "assigned_by": "IMPLICIT", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - }, - { - "id": "sklearn/sklearn.utils._param_validation/_InstancesOf/is_satisfied_by/val", - "name": "val", - "qname": "sklearn.utils._param_validation._InstancesOf.is_satisfied_by.val", - "default_value": null, - "assigned_by": "POSITION_OR_NAME", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - } - ], - "results": [], - "is_public": false, - "description": "", - "docstring": null - }, - { - "id": "sklearn/sklearn.utils._param_validation/_NoneConstraint/__str__", - "name": "__str__", - "qname": "sklearn.utils._param_validation._NoneConstraint.__str__", - "decorators": [], - "parameters": [ - { - "id": "sklearn/sklearn.utils._param_validation/_NoneConstraint/__str__/self", - "name": "self", - "qname": "sklearn.utils._param_validation._NoneConstraint.__str__.self", - "default_value": null, - "assigned_by": "IMPLICIT", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - } - ], - "results": [], - "is_public": false, - "description": "", - "docstring": null - }, - { - "id": "sklearn/sklearn.utils._param_validation/_NoneConstraint/is_satisfied_by", - "name": "is_satisfied_by", - "qname": "sklearn.utils._param_validation._NoneConstraint.is_satisfied_by", - "decorators": [], - "parameters": [ - { - "id": "sklearn/sklearn.utils._param_validation/_NoneConstraint/is_satisfied_by/self", - "name": "self", - "qname": "sklearn.utils._param_validation._NoneConstraint.is_satisfied_by.self", - "default_value": null, - "assigned_by": "IMPLICIT", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - }, - { - "id": "sklearn/sklearn.utils._param_validation/_NoneConstraint/is_satisfied_by/val", - "name": "val", - "qname": "sklearn.utils._param_validation._NoneConstraint.is_satisfied_by.val", - "default_value": null, - "assigned_by": "POSITION_OR_NAME", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - } - ], - "results": [], - "is_public": false, - "description": "", - "docstring": null - }, - { - "id": "sklearn/sklearn.utils._param_validation/_RandomStates/__init__", - "name": "__init__", - "qname": "sklearn.utils._param_validation._RandomStates.__init__", - "decorators": [], - "parameters": [ - { - "id": "sklearn/sklearn.utils._param_validation/_RandomStates/__init__/self", - "name": "self", - "qname": "sklearn.utils._param_validation._RandomStates.__init__.self", - "default_value": null, - "assigned_by": "IMPLICIT", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - } - ], - "results": [], - "is_public": false, - "description": "", - "docstring": null - }, - { - "id": "sklearn/sklearn.utils._param_validation/_RandomStates/__str__", - "name": "__str__", - "qname": "sklearn.utils._param_validation._RandomStates.__str__", - "decorators": [], - "parameters": [ - { - "id": "sklearn/sklearn.utils._param_validation/_RandomStates/__str__/self", - "name": "self", - "qname": "sklearn.utils._param_validation._RandomStates.__str__.self", - "default_value": null, - "assigned_by": "IMPLICIT", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - } - ], - "results": [], - "is_public": false, - "description": "", - "docstring": null - }, - { - "id": "sklearn/sklearn.utils._param_validation/_RandomStates/is_satisfied_by", - "name": "is_satisfied_by", - "qname": "sklearn.utils._param_validation._RandomStates.is_satisfied_by", - "decorators": [], - "parameters": [ - { - "id": "sklearn/sklearn.utils._param_validation/_RandomStates/is_satisfied_by/self", - "name": "self", - "qname": "sklearn.utils._param_validation._RandomStates.is_satisfied_by.self", - "default_value": null, - "assigned_by": "IMPLICIT", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - }, - { - "id": "sklearn/sklearn.utils._param_validation/_RandomStates/is_satisfied_by/val", - "name": "val", - "qname": "sklearn.utils._param_validation._RandomStates.is_satisfied_by.val", - "default_value": null, - "assigned_by": "POSITION_OR_NAME", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - } - ], - "results": [], - "is_public": false, - "description": "", - "docstring": null - }, - { - "id": "sklearn/sklearn.utils._param_validation/_SparseMatrices/__str__", - "name": "__str__", - "qname": "sklearn.utils._param_validation._SparseMatrices.__str__", - "decorators": [], - "parameters": [ - { - "id": "sklearn/sklearn.utils._param_validation/_SparseMatrices/__str__/self", - "name": "self", - "qname": "sklearn.utils._param_validation._SparseMatrices.__str__.self", - "default_value": null, - "assigned_by": "IMPLICIT", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - } - ], - "results": [], - "is_public": false, - "description": "", - "docstring": null - }, - { - "id": "sklearn/sklearn.utils._param_validation/_SparseMatrices/is_satisfied_by", - "name": "is_satisfied_by", - "qname": "sklearn.utils._param_validation._SparseMatrices.is_satisfied_by", - "decorators": [], - "parameters": [ - { - "id": "sklearn/sklearn.utils._param_validation/_SparseMatrices/is_satisfied_by/self", - "name": "self", - "qname": "sklearn.utils._param_validation._SparseMatrices.is_satisfied_by.self", - "default_value": null, - "assigned_by": "IMPLICIT", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - }, - { - "id": "sklearn/sklearn.utils._param_validation/_SparseMatrices/is_satisfied_by/val", - "name": "val", - "qname": "sklearn.utils._param_validation._SparseMatrices.is_satisfied_by.val", - "default_value": null, - "assigned_by": "POSITION_OR_NAME", - "is_public": false, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - } - ], - "results": [], - "is_public": false, - "description": "", - "docstring": null - }, - { - "id": "sklearn/sklearn.utils._param_validation/generate_invalid_param_val", - "name": "generate_invalid_param_val", - "qname": "sklearn.utils._param_validation.generate_invalid_param_val", - "decorators": [], - "parameters": [ - { - "id": "sklearn/sklearn.utils._param_validation/generate_invalid_param_val/constraint", - "name": "constraint", - "qname": "sklearn.utils._param_validation.generate_invalid_param_val.constraint", - "default_value": null, - "assigned_by": "POSITION_OR_NAME", - "is_public": false, - "docstring": { - "type": "Constraint", - "description": "The constraint to generate a value for." - }, - "type": { - "kind": "NamedType", - "name": "Constraint" - } - } - ], - "results": [], - "is_public": false, - "description": "Return a value that does not satisfy the constraint.\n\nThis is only useful for testing purpose.", - "docstring": "Return a value that does not satisfy the constraint.\n\n This is only useful for testing purpose.\n\n Parameters\n ----------\n constraint : Constraint\n The constraint to generate a value for.\n\n Returns\n -------\n val : object\n A value that does not satisfy the constraint.\n " - }, - { - "id": "sklearn/sklearn.utils._param_validation/make_constraint", - "name": "make_constraint", - "qname": "sklearn.utils._param_validation.make_constraint", - "decorators": [], - "parameters": [ - { - "id": "sklearn/sklearn.utils._param_validation/make_constraint/constraint", - "name": "constraint", - "qname": "sklearn.utils._param_validation.make_constraint.constraint", - "default_value": null, - "assigned_by": "POSITION_OR_NAME", - "is_public": false, - "docstring": { - "type": "object", - "description": "The constraint to convert." - }, - "type": { - "kind": "NamedType", - "name": "object" - } - } - ], - "results": [], - "is_public": false, - "description": "Convert the constraint into the appropriate Constraint object.", - "docstring": "Convert the constraint into the appropriate Constraint object.\n\n Parameters\n ----------\n constraint : object\n The constraint to convert.\n\n Returns\n -------\n constraint : instance of _Constraint\n The converted constraint.\n " - }, - { - "id": "sklearn/sklearn.utils._param_validation/validate_parameter_constraints", - "name": "validate_parameter_constraints", - "qname": "sklearn.utils._param_validation.validate_parameter_constraints", - "decorators": [], - "parameters": [ - { - "id": "sklearn/sklearn.utils._param_validation/validate_parameter_constraints/parameter_constraints", - "name": "parameter_constraints", - "qname": "sklearn.utils._param_validation.validate_parameter_constraints.parameter_constraints", - "default_value": null, - "assigned_by": "POSITION_OR_NAME", - "is_public": false, - "docstring": { - "type": "dict", - "description": "A dictionary `param_name: list of constraints`. A parameter is valid if it\nsatisfies one of the constraints from the list. Constraints can be:\n- an Interval object, representing a continuous or discrete range of numbers\n- the string \"array-like\"\n- the string \"sparse matrix\"\n- the string \"random state\"\n- callable\n- None, meaning that None is a valid value for the parameter\n- any type, meaning that any instance of this type is valid\n- a StrOptions object, representing a set of strings" - }, - "type": { - "kind": "NamedType", - "name": "dict" - } - }, - { - "id": "sklearn/sklearn.utils._param_validation/validate_parameter_constraints/params", - "name": "params", - "qname": "sklearn.utils._param_validation.validate_parameter_constraints.params", - "default_value": null, - "assigned_by": "POSITION_OR_NAME", - "is_public": false, - "docstring": { - "type": "dict", - "description": "A dictionary `param_name: param_value`. The parameters to validate against the\nconstraints." - }, - "type": { - "kind": "NamedType", - "name": "dict" - } - }, - { - "id": "sklearn/sklearn.utils._param_validation/validate_parameter_constraints/caller_name", - "name": "caller_name", - "qname": "sklearn.utils._param_validation.validate_parameter_constraints.caller_name", - "default_value": null, - "assigned_by": "POSITION_OR_NAME", - "is_public": false, - "docstring": { - "type": "str", - "description": "The name of the estimator or function or method that called this function." - }, - "type": { - "kind": "NamedType", - "name": "str" - } - } - ], - "results": [], - "is_public": false, - "description": "Validate types and values of given parameters.", - "docstring": "Validate types and values of given parameters.\n\n Parameters\n ----------\n parameter_constraints : dict\n A dictionary `param_name: list of constraints`. A parameter is valid if it\n satisfies one of the constraints from the list. Constraints can be:\n - an Interval object, representing a continuous or discrete range of numbers\n - the string \"array-like\"\n - the string \"sparse matrix\"\n - the string \"random state\"\n - callable\n - None, meaning that None is a valid value for the parameter\n - any type, meaning that any instance of this type is valid\n - a StrOptions object, representing a set of strings\n\n params : dict\n A dictionary `param_name: param_value`. The parameters to validate against the\n constraints.\n\n caller_name : str\n The name of the estimator or function or method that called this function.\n " - }, - { - "id": "sklearn/sklearn.utils._param_validation/validate_params", - "name": "validate_params", - "qname": "sklearn.utils._param_validation.validate_params", - "decorators": [], - "parameters": [ - { - "id": "sklearn/sklearn.utils._param_validation/validate_params/parameter_constraints", - "name": "parameter_constraints", - "qname": "sklearn.utils._param_validation.validate_params.parameter_constraints", - "default_value": null, - "assigned_by": "POSITION_OR_NAME", - "is_public": false, - "docstring": { - "type": "dict", - "description": "A dictionary `param_name: list of constraints`. See the docstring of\n`validate_parameter_constraints` for a description of the accepted constraints." - }, - "type": { - "kind": "NamedType", - "name": "dict" - } - } - ], - "results": [], - "is_public": false, - "description": "Decorator to validate types and values of functions and methods.", - "docstring": "Decorator to validate types and values of functions and methods.\n\n Parameters\n ----------\n parameter_constraints : dict\n A dictionary `param_name: list of constraints`. See the docstring of\n `validate_parameter_constraints` for a description of the accepted constraints.\n\n Returns\n -------\n decorated_function : function or method\n The decorated function.\n " - }, { "id": "sklearn/sklearn.utils._pprint/KeyValTuple/__repr__", "name": "__repr__", @@ -232338,16 +230299,16 @@ "type": { "kind": "EnumType", "values": [ + "index", "array", - "sparse_csc", "series", + "sparse_csr", "tuple", "slice", - "dataframe", - "index", "list", + "dataframe", "sparse", - "sparse_csr" + "sparse_csc" ] } }, @@ -236946,44 +234907,6 @@ "description": "", "docstring": null }, - { - "id": "sklearn/sklearn.utils.estimator_checks/check_param_validation", - "name": "check_param_validation", - "qname": "sklearn.utils.estimator_checks.check_param_validation", - "decorators": [], - "parameters": [ - { - "id": "sklearn/sklearn.utils.estimator_checks/check_param_validation/name", - "name": "name", - "qname": "sklearn.utils.estimator_checks.check_param_validation.name", - "default_value": null, - "assigned_by": "POSITION_OR_NAME", - "is_public": true, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - }, - { - "id": "sklearn/sklearn.utils.estimator_checks/check_param_validation/estimator_orig", - "name": "estimator_orig", - "qname": "sklearn.utils.estimator_checks.check_param_validation.estimator_orig", - "default_value": null, - "assigned_by": "POSITION_OR_NAME", - "is_public": true, - "docstring": { - "type": "", - "description": "" - }, - "type": {} - } - ], - "results": [], - "is_public": true, - "description": "", - "docstring": null - }, { "id": "sklearn/sklearn.utils.estimator_checks/check_parameters_default_constructible", "name": "check_parameters_default_constructible", @@ -238244,7 +236167,7 @@ }, "type": { "kind": "EnumType", - "values": ["none", "auto", "QR", "LU"] + "values": ["auto", "none", "LU", "QR"] } }, { @@ -238260,7 +236183,7 @@ }, "type": { "kind": "EnumType", - "values": ["module", "value"] + "values": ["value", "module"] } }, { @@ -238593,7 +236516,7 @@ }, "type": { "kind": "EnumType", - "values": ["none", "auto", "QR", "LU"] + "values": ["auto", "none", "LU", "QR"] } }, { @@ -238678,7 +236601,7 @@ "is_public": true, "docstring": { "type": "int, default=10", - "description": "Additional number of random vectors to sample the range of M so as\nto ensure proper conditioning. The total number of random vectors\nused to find the range of M is n_components + n_oversamples. Smaller\nnumber can improve speed but can negatively impact the quality of\napproximation of singular vectors and singular values. Users might wish\nto increase this parameter up to `2*k - n_components` where k is the\neffective rank, for large matrices, noisy problems, matrices with\nslowly decaying spectrums, or to increase precision accuracy. See [1]_\n(pages 5, 23 and 26)." + "description": "Additional number of random vectors to sample the range of M so as\nto ensure proper conditioning. The total number of random vectors\nused to find the range of M is n_components + n_oversamples. Smaller\nnumber can improve speed but can negatively impact the quality of\napproximation of singular vectors and singular values. Users might wish\nto increase this parameter up to `2*k - n_components` where k is the\neffective rank, for large matrices, noisy problems, matrices with\nslowly decaying spectrums, or to increase precision accuracy. See Halko\net al (pages 5, 23 and 26)." }, "type": { "kind": "NamedType", @@ -238694,7 +236617,7 @@ "is_public": true, "docstring": { "type": "int or 'auto', default='auto'", - "description": "Number of power iterations. It can be used to deal with very noisy\nproblems. When 'auto', it is set to 4, unless `n_components` is small\n(< .1 * min(X.shape)) in which case `n_iter` is set to 7.\nThis improves precision with few components. Note that in general\nusers should rather increase `n_oversamples` before increasing `n_iter`\nas the principle of the randomized method is to avoid usage of these\nmore costly power iterations steps. When `n_components` is equal\nor greater to the effective matrix rank and the spectrum does not\npresent a slow decay, `n_iter=0` or `1` should even work fine in theory\n(see [1]_ page 9).\n\n.. versionchanged:: 0.18" + "description": "Number of power iterations. It can be used to deal with very noisy\nproblems. When 'auto', it is set to 4, unless `n_components` is small\n(< .1 * min(X.shape)) in which case `n_iter` is set to 7.\nThis improves precision with few components. Note that in general\nusers should rather increase `n_oversamples` before increasing `n_iter`\nas the principle of the randomized method is to avoid usage of these\nmore costly power iterations steps. When `n_components` is equal\nor greater to the effective matrix rank and the spectrum does not\npresent a slow decay, `n_iter=0` or `1` should even work fine in theory\n(see Halko et al paper, page 9).\n\n.. versionchanged:: 0.18" }, "type": { "kind": "UnionType", @@ -238723,7 +236646,7 @@ }, "type": { "kind": "EnumType", - "values": ["none", "auto", "QR", "LU"] + "values": ["auto", "none", "LU", "QR"] } }, { @@ -238795,28 +236718,12 @@ } ] } - }, - { - "id": "sklearn/sklearn.utils.extmath/randomized_svd/svd_lapack_driver", - "name": "svd_lapack_driver", - "qname": "sklearn.utils.extmath.randomized_svd.svd_lapack_driver", - "default_value": "'gesdd'", - "assigned_by": "NAME_ONLY", - "is_public": true, - "docstring": { - "type": "{\"gesdd\", \"gesvd\"}, default=\"gesdd\"", - "description": "Whether to use the more efficient divide-and-conquer approach\n(`\"gesdd\"`) or more general rectangular approach (`\"gesvd\"`) to compute\nthe SVD of the matrix B, which is the projection of M into a low\ndimensional subspace, as described in [1]_.\n\n.. versionadded:: 1.2" - }, - "type": { - "kind": "EnumType", - "values": ["gesdd", "gesvd"] - } } ], "results": [], "is_public": true, - "description": "Computes a truncated randomized SVD.\n\nThis method solves the fixed-rank approximation problem described in [1]_\n(problem (1.5), p5).", - "docstring": "Computes a truncated randomized SVD.\n\n This method solves the fixed-rank approximation problem described in [1]_\n (problem (1.5), p5).\n\n Parameters\n ----------\n M : {ndarray, sparse matrix}\n Matrix to decompose.\n\n n_components : int\n Number of singular values and vectors to extract.\n\n n_oversamples : int, default=10\n Additional number of random vectors to sample the range of M so as\n to ensure proper conditioning. The total number of random vectors\n used to find the range of M is n_components + n_oversamples. Smaller\n number can improve speed but can negatively impact the quality of\n approximation of singular vectors and singular values. Users might wish\n to increase this parameter up to `2*k - n_components` where k is the\n effective rank, for large matrices, noisy problems, matrices with\n slowly decaying spectrums, or to increase precision accuracy. See [1]_\n (pages 5, 23 and 26).\n\n n_iter : int or 'auto', default='auto'\n Number of power iterations. It can be used to deal with very noisy\n problems. When 'auto', it is set to 4, unless `n_components` is small\n (< .1 * min(X.shape)) in which case `n_iter` is set to 7.\n This improves precision with few components. Note that in general\n users should rather increase `n_oversamples` before increasing `n_iter`\n as the principle of the randomized method is to avoid usage of these\n more costly power iterations steps. When `n_components` is equal\n or greater to the effective matrix rank and the spectrum does not\n present a slow decay, `n_iter=0` or `1` should even work fine in theory\n (see [1]_ page 9).\n\n .. versionchanged:: 0.18\n\n power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto'\n Whether the power iterations are normalized with step-by-step\n QR factorization (the slowest but most accurate), 'none'\n (the fastest but numerically unstable when `n_iter` is large, e.g.\n typically 5 or larger), or 'LU' factorization (numerically stable\n but can lose slightly in accuracy). The 'auto' mode applies no\n normalization if `n_iter` <= 2 and switches to LU otherwise.\n\n .. versionadded:: 0.18\n\n transpose : bool or 'auto', default='auto'\n Whether the algorithm should be applied to M.T instead of M. The\n result should approximately be the same. The 'auto' mode will\n trigger the transposition if M.shape[1] > M.shape[0] since this\n implementation of randomized SVD tend to be a little faster in that\n case.\n\n .. versionchanged:: 0.18\n\n flip_sign : bool, default=True\n The output of a singular value decomposition is only unique up to a\n permutation of the signs of the singular vectors. If `flip_sign` is\n set to `True`, the sign ambiguity is resolved by making the largest\n loadings for each component in the left singular vectors positive.\n\n random_state : int, RandomState instance or None, default='warn'\n The seed of the pseudo random number generator to use when\n shuffling the data, i.e. getting the random vectors to initialize\n the algorithm. Pass an int for reproducible results across multiple\n function calls. See :term:`Glossary `.\n\n .. versionchanged:: 1.2\n The previous behavior (`random_state=0`) is deprecated, and\n from v1.2 the default value will be `random_state=None`. Set\n the value of `random_state` explicitly to suppress the deprecation\n warning.\n\n svd_lapack_driver : {\"gesdd\", \"gesvd\"}, default=\"gesdd\"\n Whether to use the more efficient divide-and-conquer approach\n (`\"gesdd\"`) or more general rectangular approach (`\"gesvd\"`) to compute\n the SVD of the matrix B, which is the projection of M into a low\n dimensional subspace, as described in [1]_.\n\n .. versionadded:: 1.2\n\n Notes\n -----\n This algorithm finds a (usually very good) approximate truncated\n singular value decomposition using randomization to speed up the\n computations. It is particularly fast on large matrices on which\n you wish to extract only a small number of components. In order to\n obtain further speed up, `n_iter` can be set <=2 (at the cost of\n loss of precision). To increase the precision it is recommended to\n increase `n_oversamples`, up to `2*k-n_components` where k is the\n effective rank. Usually, `n_components` is chosen to be greater than k\n so increasing `n_oversamples` up to `n_components` should be enough.\n\n References\n ----------\n .. [1] :arxiv:`\"Finding structure with randomness:\n Stochastic algorithms for constructing approximate matrix decompositions\"\n <0909.4061>`\n Halko, et al. (2009)\n\n .. [2] A randomized algorithm for the decomposition of matrices\n Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert\n\n .. [3] An implementation of a randomized algorithm for principal component\n analysis A. Szlam et al. 2014\n " + "description": "Computes a truncated randomized SVD.\n\nThis method solves the fixed-rank approximation problem described in the\nHalko et al paper (problem (1.5), p5).", + "docstring": "Computes a truncated randomized SVD.\n\n This method solves the fixed-rank approximation problem described in the\n Halko et al paper (problem (1.5), p5).\n\n Parameters\n ----------\n M : {ndarray, sparse matrix}\n Matrix to decompose.\n\n n_components : int\n Number of singular values and vectors to extract.\n\n n_oversamples : int, default=10\n Additional number of random vectors to sample the range of M so as\n to ensure proper conditioning. The total number of random vectors\n used to find the range of M is n_components + n_oversamples. Smaller\n number can improve speed but can negatively impact the quality of\n approximation of singular vectors and singular values. Users might wish\n to increase this parameter up to `2*k - n_components` where k is the\n effective rank, for large matrices, noisy problems, matrices with\n slowly decaying spectrums, or to increase precision accuracy. See Halko\n et al (pages 5, 23 and 26).\n\n n_iter : int or 'auto', default='auto'\n Number of power iterations. It can be used to deal with very noisy\n problems. When 'auto', it is set to 4, unless `n_components` is small\n (< .1 * min(X.shape)) in which case `n_iter` is set to 7.\n This improves precision with few components. Note that in general\n users should rather increase `n_oversamples` before increasing `n_iter`\n as the principle of the randomized method is to avoid usage of these\n more costly power iterations steps. When `n_components` is equal\n or greater to the effective matrix rank and the spectrum does not\n present a slow decay, `n_iter=0` or `1` should even work fine in theory\n (see Halko et al paper, page 9).\n\n .. versionchanged:: 0.18\n\n power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto'\n Whether the power iterations are normalized with step-by-step\n QR factorization (the slowest but most accurate), 'none'\n (the fastest but numerically unstable when `n_iter` is large, e.g.\n typically 5 or larger), or 'LU' factorization (numerically stable\n but can lose slightly in accuracy). The 'auto' mode applies no\n normalization if `n_iter` <= 2 and switches to LU otherwise.\n\n .. versionadded:: 0.18\n\n transpose : bool or 'auto', default='auto'\n Whether the algorithm should be applied to M.T instead of M. The\n result should approximately be the same. The 'auto' mode will\n trigger the transposition if M.shape[1] > M.shape[0] since this\n implementation of randomized SVD tend to be a little faster in that\n case.\n\n .. versionchanged:: 0.18\n\n flip_sign : bool, default=True\n The output of a singular value decomposition is only unique up to a\n permutation of the signs of the singular vectors. If `flip_sign` is\n set to `True`, the sign ambiguity is resolved by making the largest\n loadings for each component in the left singular vectors positive.\n\n random_state : int, RandomState instance or None, default='warn'\n The seed of the pseudo random number generator to use when\n shuffling the data, i.e. getting the random vectors to initialize\n the algorithm. Pass an int for reproducible results across multiple\n function calls. See :term:`Glossary `.\n\n .. versionchanged:: 1.2\n The previous behavior (`random_state=0`) is deprecated, and\n from v1.2 the default value will be `random_state=None`. Set\n the value of `random_state` explicitly to suppress the deprecation\n warning.\n\n Notes\n -----\n This algorithm finds a (usually very good) approximate truncated\n singular value decomposition using randomization to speed up the\n computations. It is particularly fast on large matrices on which\n you wish to extract only a small number of components. In order to\n obtain further speed up, `n_iter` can be set <=2 (at the cost of\n loss of precision). To increase the precision it is recommended to\n increase `n_oversamples`, up to `2*k-n_components` where k is the\n effective rank. Usually, `n_components` is chosen to be greater than k\n so increasing `n_oversamples` up to `n_components` should be enough.\n\n References\n ----------\n * :arxiv:`\"Finding structure with randomness:\n Stochastic algorithms for constructing approximate matrix decompositions\"\n <0909.4061>`\n Halko, et al. (2009)\n\n * A randomized algorithm for the decomposition of matrices\n Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert\n\n * An implementation of a randomized algorithm for principal component\n analysis\n A. Szlam et al. 2014\n " }, { "id": "sklearn/sklearn.utils.extmath/row_norms", @@ -239588,7 +237495,7 @@ }, "type": { "kind": "EnumType", - "values": ["FW", "auto", "D"] + "values": ["auto", "FW", "D"] } } ], @@ -242062,8 +239969,8 @@ ], "results": [], "is_public": true, - "description": "Swap two columns of a CSC/CSR matrix in-place.", - "docstring": "\n Swap two columns of a CSC/CSR matrix in-place.\n\n Parameters\n ----------\n X : sparse matrix of shape (n_samples, n_features)\n Matrix whose two columns are to be swapped. It should be of\n CSR or CSC format.\n\n m : int\n Index of the column of X to be swapped.\n\n n : int\n Index of the column of X to be swapped.\n " + "description": "Swaps two columns of a CSC/CSR matrix in-place.", + "docstring": "\n Swaps two columns of a CSC/CSR matrix in-place.\n\n Parameters\n ----------\n X : sparse matrix of shape (n_samples, n_features)\n Matrix whose two columns are to be swapped. It should be of\n CSR or CSC format.\n\n m : int\n Index of the column of X to be swapped.\n\n n : int\n Index of the column of X to be swapped.\n " }, { "id": "sklearn/sklearn.utils.sparsefuncs/inplace_swap_row", @@ -243775,7 +241682,7 @@ }, "type": { "kind": "EnumType", - "values": ["C", "F"] + "values": ["F", "C"] } }, { @@ -244062,7 +241969,7 @@ "types": [ { "kind": "EnumType", - "values": ["C", "F"] + "values": ["F", "C"] }, { "kind": "NamedType", @@ -244584,7 +242491,7 @@ }, "type": { "kind": "EnumType", - "values": ["both", "left", "right", "neither"] + "values": ["both", "left", "neither", "right"] } } ], @@ -245435,7 +243342,7 @@ "types": [ { "kind": "EnumType", - "values": ["regressor", "cluster", "classifier", "transformer"] + "values": ["cluster", "classifier", "transformer", "regressor"] }, { "kind": "NamedType", diff --git a/package-parser/package_parser/processing/annotations/_usages_preprocessor.py b/package-parser/package_parser/processing/annotations/_usages_preprocessor.py index aa0cd4ee1..698e1b189 100644 --- a/package-parser/package_parser/processing/annotations/_usages_preprocessor.py +++ b/package-parser/package_parser/processing/annotations/_usages_preprocessor.py @@ -1,50 +1,13 @@ -import logging - from package_parser.model.api import API from package_parser.model.usages import UsageCountStore from package_parser.utils import parent_id def _preprocess_usages(usages: UsageCountStore, api: API) -> None: - _remove_internal_usages(usages, api) _add_unused_api_elements(usages, api) _add_implicit_usages_of_default_value(usages, api) -def _remove_internal_usages(usages: UsageCountStore, api: API) -> None: - """ - Removes usages of internal parts of the API. It might incorrectly remove some calls to methods that are inherited - from internal classes into a public class but these are just fit/predict/etc., i.e. something we want to keep - unchanged anyway. - - :param usages: Usage store - :param api: Description of the API - """ - - # Internal classes - for class_id in list(usages.class_usages.keys()): - if not api.is_public_class(class_id): - logging.info(f"Removing usages of internal class {class_id}") - usages.remove_class(class_id) - - # Internal functions - for function_id in list(usages.function_usages.keys()): - if not api.is_public_function(function_id): - logging.info(f"Removing usages of internal function {function_id}") - usages.remove_function(function_id) - - # Internal parameters - parameter_ids_in_api = set(api.parameters().keys()) - - for parameter_id in list(usages.parameter_usages.keys()): - function_id = parent_id(parameter_id) - if parameter_id not in parameter_ids_in_api or not api.is_public_function( - function_id - ): - logging.info(f"Removing usages of internal parameter {parameter_id}") - usages.remove_parameter(parameter_id) - - def _add_unused_api_elements(usages: UsageCountStore, api: API) -> None: """ Adds unused API elements to the UsageStore. When a class, function or parameter is not used, it is not content of @@ -54,20 +17,15 @@ def _add_unused_api_elements(usages: UsageCountStore, api: API) -> None: :param api: Description of the API """ - # Public classes for class_id in api.classes: - if api.is_public_class(class_id): - usages.add_class_usages(class_id, 0) + usages.add_class_usages(class_id, 0) - # Public functions for function in api.functions.values(): - if api.is_public_function(function.id): - usages.add_function_usages(function.id, 0) + usages.add_function_usages(function.id, 0) - # "Public" parameters - for parameter in function.parameters: - usages.init_value(parameter.id) - usages.add_parameter_usages(parameter.id, 0) + for parameter in function.parameters: + usages.add_parameter_usages(parameter.id, 0) + usages.init_value(parameter.id) def _add_implicit_usages_of_default_value(usages: UsageCountStore, api: API) -> None: diff --git a/package-parser/package_parser/processing/api/_ast_visitor.py b/package-parser/package_parser/processing/api/_ast_visitor.py index 0c94604e0..fb8e823be 100644 --- a/package-parser/package_parser/processing/api/_ast_visitor.py +++ b/package-parser/package_parser/processing/api/_ast_visitor.py @@ -15,7 +15,7 @@ ParameterAndResultDocstring, ParameterAssignment, ) -from package_parser.utils import parent_id +from package_parser.utils import parent_qualified_name from ._file_filters import _is_init_file @@ -315,7 +315,7 @@ def is_public(self, name: str, qualified_name: str) -> bool: return True # Containing class is re-exported (always false if the current API element is not a method) - if parent_id(qualified_name) in self.reexported: + if parent_qualified_name(qualified_name) in self.reexported: return True # The slicing is necessary so __init__ functions are not excluded (already handled in the first condition). diff --git a/package-parser/package_parser/utils/__init__.py b/package-parser/package_parser/utils/__init__.py index bd442ad38..466c9f1c3 100644 --- a/package-parser/package_parser/utils/__init__.py +++ b/package-parser/package_parser/utils/__init__.py @@ -1,4 +1,4 @@ from ._ASTWalker import ASTWalker from ._files import ensure_file_exists, initialize_and_read_exclude_file, list_files -from ._names import declaration_qname_to_name, parent_id +from ._names import declaration_qname_to_name, parent_id, parent_qualified_name from ._parsing import parse_python_code diff --git a/package-parser/package_parser/utils/_names.py b/package-parser/package_parser/utils/_names.py index 522aaf867..0b963391e 100644 --- a/package-parser/package_parser/utils/_names.py +++ b/package-parser/package_parser/utils/_names.py @@ -1,6 +1,10 @@ -def declaration_qname_to_name(qname: str) -> str: - return qname.split(".")[-1] +def declaration_qname_to_name(qualified_name: str) -> str: + return qualified_name.split(".")[-1] def parent_id(id_: str) -> str: return "/".join(id_.split("/")[:-1]) + + +def parent_qualified_name(qualified_name: str) -> str: + return ".".join(qualified_name.split(".")[:-1])