Skip to content

Commit 40b637d

Browse files
committed
Fix error after upgrading genjavadoc to 0.14
1 parent f146853 commit 40b637d

File tree

10 files changed

+29
-28
lines changed

10 files changed

+29
-28
lines changed

core/src/main/scala/org/apache/spark/rpc/RpcCallContext.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ package org.apache.spark.rpc
2424
private[spark] trait RpcCallContext {
2525

2626
/**
27-
* Reply a message to the sender. If the sender is [[RpcEndpoint]], its [[RpcEndpoint.receive]]
27+
* Reply a message to the sender. If the sender is [[RpcEndpoint]], its `RpcEndpoint.receive`
2828
* will be called.
2929
*/
3030
def reply(response: Any): Unit

core/src/main/scala/org/apache/spark/status/api/v1/ApiRootResource.scala

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -77,16 +77,16 @@ private[spark] trait UIRoot {
7777
/**
7878
* Runs some code with the current SparkUI instance for the app / attempt.
7979
*
80-
* @throws NoSuchElementException If the app / attempt pair does not exist.
80+
* @throws java.util.NoSuchElementException If the app / attempt pair does not exist.
8181
*/
8282
def withSparkUI[T](appId: String, attemptId: Option[String])(fn: SparkUI => T): T
8383

8484
def getApplicationInfoList: Iterator[ApplicationInfo]
8585
def getApplicationInfo(appId: String): Option[ApplicationInfo]
8686

8787
/**
88-
* Write the event logs for the given app to the [[ZipOutputStream]] instance. If attemptId is
89-
* [[None]], event logs for all attempts of this application will be written out.
88+
* Write the event logs for the given app to the `ZipOutputStream` instance. If attemptId is
89+
* `None`, event logs for all attempts of this application will be written out.
9090
*/
9191
def writeEventLogs(appId: String, attemptId: Option[String], zipStream: ZipOutputStream): Unit = {
9292
Response.serverError()

core/src/main/scala/org/apache/spark/util/SizeEstimator.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ import org.apache.spark.util.collection.OpenHashSet
3333
/**
3434
* A trait that allows a class to give [[SizeEstimator]] more accurate size estimation.
3535
* When a class extends it, [[SizeEstimator]] will query the `estimatedSize` first.
36-
* If `estimatedSize` does not return [[None]], [[SizeEstimator]] will use the returned size
36+
* If `estimatedSize` does not return `None`, [[SizeEstimator]] will use the returned size
3737
* as the size of the object. Otherwise, [[SizeEstimator]] will do the estimation work.
3838
* The difference between a [[KnownSizeEstimation]] and
3939
* [[org.apache.spark.util.collection.SizeTracker]] is that, a

external/kinesis-asl/src/main/scala/org/apache/spark/streaming/kinesis/SparkAWSCredentials.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -102,8 +102,8 @@ object SparkAWSCredentials {
102102
*
103103
* @note The given AWS keypair will be saved in DStream checkpoints if checkpointing is
104104
* enabled. Make sure that your checkpoint directory is secure. Prefer using the
105-
* [[http://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/credentials.html#credentials-default default provider chain]]
106-
* instead if possible.
105+
* default provider chain instead if possible
106+
* (http://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/credentials.html#credentials-default).
107107
*
108108
* @param accessKeyId AWS access key ID
109109
* @param secretKey AWS secret key

mllib/src/main/scala/org/apache/spark/ml/ann/Layer.scala

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -371,15 +371,15 @@ private[ann] trait TopologyModel extends Serializable {
371371
def forward(data: BDM[Double], includeLastLayer: Boolean): Array[BDM[Double]]
372372

373373
/**
374-
* Prediction of the model. See {@link ProbabilisticClassificationModel}
374+
* Prediction of the model. See `ProbabilisticClassificationModel``
375375
*
376376
* @param features input features
377377
* @return prediction
378378
*/
379379
def predict(features: Vector): Vector
380380

381381
/**
382-
* Raw prediction of the model. See {@link ProbabilisticClassificationModel}
382+
* Raw prediction of the model. See `ProbabilisticClassificationModel`
383383
*
384384
* @param features input features
385385
* @return raw prediction
@@ -389,7 +389,7 @@ private[ann] trait TopologyModel extends Serializable {
389389
def predictRaw(features: Vector): Vector
390390

391391
/**
392-
* Probability of the model. See {@link ProbabilisticClassificationModel}
392+
* Probability of the model. See `ProbabilisticClassificationModel`
393393
*
394394
* @param rawPrediction raw prediction vector
395395
* @return probability

mllib/src/main/scala/org/apache/spark/ml/attribute/attributes.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,7 @@ sealed abstract class Attribute extends Serializable {
121121
private[attribute] trait AttributeFactory {
122122

123123
/**
124-
* Creates an [[Attribute]] from a [[Metadata]] instance.
124+
* Creates an [[Attribute]] from a `Metadata` instance.
125125
*/
126126
private[attribute] def fromMetadata(metadata: Metadata): Attribute
127127

mllib/src/main/scala/org/apache/spark/ml/stat/Correlation.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ object Correlation {
4949
* Supported: `pearson` (default), `spearman`
5050
* @return A dataframe that contains the correlation matrix of the column of vectors. This
5151
* dataframe contains a single row and a single column of name
52-
* '$METHODNAME($COLUMN)'.
52+
* `$METHODNAME($COLUMN)`.
5353
* @throws IllegalArgumentException if the column is not a valid column in the dataset, or if
5454
* the content of this column is not of type Vector.
5555
*

mllib/src/main/scala/org/apache/spark/ml/tree/treeParams.scala

Lines changed: 13 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -40,43 +40,43 @@ private[ml] trait DecisionTreeParams extends PredictorParams
4040
with HasCheckpointInterval with HasSeed {
4141

4242
/**
43-
* Maximum depth of the tree (>= 0).
43+
* Maximum depth of the tree (nonnegative).
4444
* E.g., depth 0 means 1 leaf node; depth 1 means 1 internal node + 2 leaf nodes.
4545
* (default = 5)
4646
* @group param
4747
*/
4848
final val maxDepth: IntParam =
49-
new IntParam(this, "maxDepth", "Maximum depth of the tree. (>= 0)" +
49+
new IntParam(this, "maxDepth", "Maximum depth of the tree. (Nonnegative)" +
5050
" E.g., depth 0 means 1 leaf node; depth 1 means 1 internal node + 2 leaf nodes.",
5151
ParamValidators.gtEq(0))
5252

5353
/**
5454
* Maximum number of bins used for discretizing continuous features and for choosing how to split
5555
* on features at each node. More bins give higher granularity.
56-
* Must be >= 2 and >= number of categories in any categorical feature.
56+
* Must be at least 2 and at least number of categories in any categorical feature.
5757
* (default = 32)
5858
* @group param
5959
*/
6060
final val maxBins: IntParam = new IntParam(this, "maxBins", "Max number of bins for" +
61-
" discretizing continuous features. Must be >=2 and >= number of categories for any" +
62-
" categorical feature.", ParamValidators.gtEq(2))
61+
" discretizing continuous features. Must be at least 2 and at least number of categories" +
62+
" for any categorical feature.", ParamValidators.gtEq(2))
6363

6464
/**
6565
* Minimum number of instances each child must have after split.
6666
* If a split causes the left or right child to have fewer than minInstancesPerNode,
6767
* the split will be discarded as invalid.
68-
* Should be >= 1.
68+
* Must be at least 1.
6969
* (default = 1)
7070
* @group param
7171
*/
7272
final val minInstancesPerNode: IntParam = new IntParam(this, "minInstancesPerNode", "Minimum" +
7373
" number of instances each child must have after split. If a split causes the left or right" +
7474
" child to have fewer than minInstancesPerNode, the split will be discarded as invalid." +
75-
" Should be >= 1.", ParamValidators.gtEq(1))
75+
" Must be at least 1.", ParamValidators.gtEq(1))
7676

7777
/**
7878
* Minimum information gain for a split to be considered at a tree node.
79-
* Should be >= 0.0.
79+
* Should be at least 0.0.
8080
* (default = 0.0)
8181
* @group param
8282
*/
@@ -372,7 +372,7 @@ private[ml] trait TreeEnsembleParams extends DecisionTreeParams {
372372
* Supported options:
373373
* - "auto": Choose automatically for task:
374374
* If numTrees == 1, set to "all."
375-
* If numTrees > 1 (forest), set to "sqrt" for classification and
375+
* If numTrees greater than 1 (forest), set to "sqrt" for classification and
376376
* to "onethird" for regression.
377377
* - "all": use all features
378378
* - "onethird": use 1/3 of the features
@@ -424,8 +424,8 @@ private[ml] trait TreeEnsembleParams extends DecisionTreeParams {
424424
private[ml] trait RandomForestParams extends TreeEnsembleParams {
425425

426426
/**
427-
* Number of trees to train (>= 1).
428-
* If 1, then no bootstrapping is used. If > 1, then bootstrapping is done.
427+
* Number of trees to train (at least 1).
428+
* If 1, then no bootstrapping is used. If greater than 1, then bootstrapping is done.
429429
* TODO: Change to always do bootstrapping (simpler). SPARK-7130
430430
* (default = 20)
431431
*
@@ -434,7 +434,8 @@ private[ml] trait RandomForestParams extends TreeEnsembleParams {
434434
* are a bit different.
435435
* @group param
436436
*/
437-
final val numTrees: IntParam = new IntParam(this, "numTrees", "Number of trees to train (>= 1)",
437+
final val numTrees: IntParam =
438+
new IntParam(this, "numTrees", "Number of trees to train (at least 1)",
438439
ParamValidators.gtEq(1))
439440

440441
setDefault(numTrees -> 20)

mllib/src/main/scala/org/apache/spark/mllib/stat/test/StreamingTestMethod.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ import org.apache.spark.util.StatCounter
3333
/**
3434
* Significance testing methods for [[StreamingTest]]. New 2-sample statistical significance tests
3535
* should extend [[StreamingTestMethod]] and introduce a new entry in
36-
* [[StreamingTestMethod.TEST_NAME_TO_OBJECT]]
36+
* `StreamingTestMethod.TEST_NAME_TO_OBJECT`
3737
*/
3838
private[stat] sealed trait StreamingTestMethod extends Serializable {
3939

sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClient.scala

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ private[hive] trait HiveClient {
4141

4242
/**
4343
* Return the associated Hive SessionState of this [[HiveClientImpl]]
44-
* @return [[Any]] not SessionState to avoid linkage error
44+
* @return `Any` not SessionState to avoid linkage error
4545
*/
4646
def getState: Any
4747

@@ -76,7 +76,7 @@ private[hive] trait HiveClient {
7676
/** Return whether a table/view with the specified name exists. */
7777
def tableExists(dbName: String, tableName: String): Boolean
7878

79-
/** Returns the specified table, or throws [[NoSuchTableException]]. */
79+
/** Returns the specified table, or throws `NoSuchTableException`. */
8080
final def getTable(dbName: String, tableName: String): CatalogTable = {
8181
getTableOption(dbName, tableName).getOrElse(throw new NoSuchTableException(dbName, tableName))
8282
}
@@ -166,7 +166,7 @@ private[hive] trait HiveClient {
166166
table: String,
167167
newParts: Seq[CatalogTablePartition]): Unit
168168

169-
/** Returns the specified partition, or throws [[NoSuchPartitionException]]. */
169+
/** Returns the specified partition, or throws `NoSuchPartitionException`. */
170170
final def getPartition(
171171
dbName: String,
172172
tableName: String,

0 commit comments

Comments
 (0)