Skip to content

Commit d2eb7fe

Browse files
committed
Convert optimizer properties to local variables
1 parent 2e20635 commit d2eb7fe

File tree

8 files changed

+30
-74
lines changed

8 files changed

+30
-74
lines changed

api/src/main/kotlin/org/jetbrains/kotlinx/dl/api/core/optimizer/AdaDelta.kt

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@ import org.jetbrains.kotlinx.dl.api.core.KGraph
99
import org.jetbrains.kotlinx.dl.api.core.util.getDType
1010
import org.tensorflow.Operand
1111
import org.tensorflow.op.Ops
12-
import org.tensorflow.op.core.Constant
1312
import org.tensorflow.op.core.Gradients
1413
import org.tensorflow.op.core.Variable
1514
import org.tensorflow.op.train.ApplyAdadelta
@@ -49,9 +48,6 @@ public class AdaDelta(
4948
private val epsilon: Float = 1e-8f,
5049
clipGradient: ClipGradientAction = NoClipGradient()
5150
) : Optimizer(clipGradient) {
52-
private lateinit var epsilonConstant: Constant<Float>
53-
private lateinit var learningRateConst: Constant<Float>
54-
private lateinit var rhoConst: Constant<Float>
5551

5652
init {
5753
require(learningRate >= 0.0f) { "Learning rate $learningRate should be >= 0.0." }
@@ -67,9 +63,9 @@ public class AdaDelta(
6763
): List<Operand<Float>> {
6864
val targets = mutableListOf<Operand<Float>>()
6965

70-
rhoConst = tf.constant(rho, getDType())
71-
learningRateConst = tf.constant(learningRate, getDType())
72-
epsilonConstant = tf.constant(epsilon, getDType())
66+
val rhoConst = tf.constant(rho, getDType())
67+
val learningRateConst = tf.constant(learningRate, getDType())
68+
val epsilonConstant = tf.constant(epsilon, getDType())
7369

7470
for ((i, variable) in weights.withIndex()) {
7571
val output = variable.asOutput()

api/src/main/kotlin/org/jetbrains/kotlinx/dl/api/core/optimizer/AdaGrad.kt

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@ import org.jetbrains.kotlinx.dl.api.core.KGraph
99
import org.jetbrains.kotlinx.dl.api.core.util.getDType
1010
import org.tensorflow.Operand
1111
import org.tensorflow.op.Ops
12-
import org.tensorflow.op.core.Constant
1312
import org.tensorflow.op.core.Gradients
1413
import org.tensorflow.op.core.Variable
1514
import org.tensorflow.op.train.ApplyAdagrad
@@ -42,8 +41,6 @@ public class AdaGrad(
4241
private val initialAccumulatorValue: Float = 0.01f,
4342
clipGradient: ClipGradientAction = NoClipGradient()
4443
) : Optimizer(clipGradient) {
45-
private lateinit var initialAccumulatorValueConstant: Constant<Float>
46-
private lateinit var learningRateConst: Constant<Float>
4744

4845
init {
4946
require(learningRate >= 0.0f) { "Learning rate $learningRate should be >= 0.0." }
@@ -58,8 +55,7 @@ public class AdaGrad(
5855
): List<Operand<Float>> {
5956
val targets = mutableListOf<Operand<Float>>()
6057

61-
initialAccumulatorValueConstant = tf.constant(initialAccumulatorValue, getDType())
62-
learningRateConst = tf.constant(learningRate, getDType())
58+
val learningRateConst = tf.constant(learningRate, getDType())
6359

6460
for ((i, variable) in weights.withIndex()) {
6561
val slot = createSlot(ACCUMULATOR, variable.asOutput(), tf, graph, initialValue = initialAccumulatorValue)

api/src/main/kotlin/org/jetbrains/kotlinx/dl/api/core/optimizer/AdaGradDA.kt

Lines changed: 4 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@ import org.tensorflow.Operand
1414
import org.tensorflow.Shape
1515
import org.tensorflow.op.Ops
1616
import org.tensorflow.op.core.Assign
17-
import org.tensorflow.op.core.Constant
1817
import org.tensorflow.op.core.Gradients
1918
import org.tensorflow.op.core.Variable
2019
import org.tensorflow.op.train.ApplyAdagradDa
@@ -51,10 +50,6 @@ public class AdaGradDA(
5150
private val l2Strength: Float = 0.01f,
5251
clipGradient: ClipGradientAction = NoClipGradient()
5352
) : Optimizer(clipGradient) {
54-
private lateinit var learningRateConst: Constant<Float>
55-
private lateinit var l1StrengthConst: Constant<Float>
56-
private lateinit var l2StrengthConst: Constant<Float>
57-
private lateinit var globalStep: Variable<Float>
5853

5954
init {
6055
require(learningRate >= 0.0f) { "Learning rate $learningRate should be >= 0.0." }
@@ -71,11 +66,11 @@ public class AdaGradDA(
7166
): List<Operand<Float>> {
7267
val targets = mutableListOf<Operand<Float>>()
7368

74-
learningRateConst = tf.constant(learningRate, getDType())
75-
l1StrengthConst = tf.constant(l1Strength, getDType())
76-
l2StrengthConst = tf.constant(l2Strength, getDType())
69+
val learningRateConst = tf.constant(learningRate, getDType())
70+
val l1StrengthConst = tf.constant(l1Strength, getDType())
71+
val l2StrengthConst = tf.constant(l2Strength, getDType())
7772

78-
globalStep = tf.withName(GLOBAL_STEP).variable(Shape.scalar(), getDType())
73+
val globalStep = tf.withName(GLOBAL_STEP).variable(Shape.scalar(), getDType())
7974
val globalStepAssignName = defaultAssignOpName(GLOBAL_STEP)
8075
val globalStepInit: Assign<*> = tf.withName(globalStepAssignName)
8176
.assign(globalStep, tf.withName(defaultInitializerOpName(GLOBAL_STEP)).constant(0.0f))

api/src/main/kotlin/org/jetbrains/kotlinx/dl/api/core/optimizer/Adam.kt

Lines changed: 6 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@ import org.tensorflow.Operand
1414
import org.tensorflow.Shape
1515
import org.tensorflow.op.Ops
1616
import org.tensorflow.op.core.Assign
17-
import org.tensorflow.op.core.Constant
1817
import org.tensorflow.op.core.Gradients
1918
import org.tensorflow.op.core.Variable
2019
import org.tensorflow.op.train.ApplyAdam
@@ -52,13 +51,6 @@ public class Adam(
5251
clipGradient: ClipGradientAction = NoClipGradient()
5352
) : Optimizer(clipGradient) {
5453

55-
private lateinit var epsilonConstant: Constant<Float>
56-
private lateinit var learningRateConst: Constant<Float>
57-
private lateinit var betaOneConst: Constant<Float>
58-
private lateinit var betaTwoConst: Constant<Float>
59-
private lateinit var betaOnePower: Variable<Float>
60-
private lateinit var betaTwoPower: Variable<Float>
61-
6254
init {
6355
require(learningRate >= 0.0f) { "Learning rate $learningRate should be >= 0.0." }
6456
require(beta1 > 0.0f && beta1 < 1.0f) { "Beta1 $beta1 should be in range (0.0; 1.0)." }
@@ -74,12 +66,12 @@ public class Adam(
7466
): List<Operand<Float>> {
7567
val targets = mutableListOf<Operand<Float>>()
7668

77-
betaOneConst = tf.constant(beta1, getDType())
78-
betaTwoConst = tf.constant(beta2, getDType())
79-
learningRateConst = tf.constant(learningRate, getDType())
80-
epsilonConstant = tf.constant(epsilon, getDType())
69+
val betaOneConst = tf.constant(beta1, getDType())
70+
val betaTwoConst = tf.constant(beta2, getDType())
71+
val learningRateConst = tf.constant(learningRate, getDType())
72+
val epsilonConstant = tf.constant(epsilon, getDType())
8173

82-
betaOnePower = tf.withName(FIRST_BETA_POWER_NAME).variable(Shape.scalar(), getDType())
74+
val betaOnePower = tf.withName(FIRST_BETA_POWER_NAME).variable(Shape.scalar(), getDType())
8375
val betaOnePowerAssignName = defaultAssignOpName(FIRST_BETA_POWER_NAME)
8476
val betaOnePowerInit: Assign<*> = tf.withName(betaOnePowerAssignName)
8577
.assign(
@@ -88,7 +80,7 @@ public class Adam(
8880
)
8981
graph.addOptimizerVariableInitializer(betaOnePowerInit)
9082

91-
betaTwoPower = tf.withName(SECOND_BETA_POWER_NAME).variable(Shape.scalar(), getDType())
83+
val betaTwoPower = tf.withName(SECOND_BETA_POWER_NAME).variable(Shape.scalar(), getDType())
9284
val betaTwoPowerAssignName = defaultAssignOpName(SECOND_BETA_POWER_NAME)
9385
val betaTwoPowerInit: Assign<*> = tf.withName(betaTwoPowerAssignName)
9486
.assign(

api/src/main/kotlin/org/jetbrains/kotlinx/dl/api/core/optimizer/Adamax.kt

Lines changed: 5 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@ import org.tensorflow.Shape
1515
import org.tensorflow.op.Ops
1616
import org.tensorflow.op.Scope
1717
import org.tensorflow.op.core.Assign
18-
import org.tensorflow.op.core.Constant
1918
import org.tensorflow.op.core.Gradients
2019
import org.tensorflow.op.core.Variable
2120
import org.tensorflow.op.train.ApplyAdaMax
@@ -52,12 +51,6 @@ public class Adamax(
5251
clipGradient: ClipGradientAction = NoClipGradient()
5352
) : Optimizer(clipGradient) {
5453

55-
private lateinit var epsilonConstant: Constant<Float>
56-
private lateinit var learningRateConst: Constant<Float>
57-
private lateinit var betaOneConst: Constant<Float>
58-
private lateinit var betaTwoConst: Constant<Float>
59-
private lateinit var betaOnePower: Variable<Float>
60-
6154
init {
6255
require(learningRate >= 0.0f) { "Learning rate $learningRate should be >= 0.0." }
6356
require(beta1 > 0.0f && beta1 < 1.0f) { "Beta1 $beta1 should be in range (0.0; 1.0)." }
@@ -73,12 +66,12 @@ public class Adamax(
7366
): List<Operand<Float>> {
7467
val targets = mutableListOf<Operand<Float>>()
7568

76-
betaOneConst = tf.constant(beta1, getDType())
77-
betaTwoConst = tf.constant(beta2, getDType())
78-
learningRateConst = tf.constant(learningRate, getDType())
79-
epsilonConstant = tf.constant(epsilon, getDType())
69+
val betaOneConst = tf.constant(beta1, getDType())
70+
val betaTwoConst = tf.constant(beta2, getDType())
71+
val learningRateConst = tf.constant(learningRate, getDType())
72+
val epsilonConstant = tf.constant(epsilon, getDType())
8073

81-
betaOnePower = tf.withName(FIRST_BETA_POWER_NAME).variable(Shape.scalar(), getDType())
74+
val betaOnePower = tf.withName(FIRST_BETA_POWER_NAME).variable(Shape.scalar(), getDType())
8275
val betaOnePowerAssignName = defaultAssignOpName(FIRST_BETA_POWER_NAME)
8376
val betaOnePowerInit: Assign<*> = tf.withName(betaOnePowerAssignName)
8477
.assign(

api/src/main/kotlin/org/jetbrains/kotlinx/dl/api/core/optimizer/Ftrl.kt

Lines changed: 5 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@ import org.jetbrains.kotlinx.dl.api.core.KGraph
99
import org.jetbrains.kotlinx.dl.api.core.util.getDType
1010
import org.tensorflow.Operand
1111
import org.tensorflow.op.Ops
12-
import org.tensorflow.op.core.Constant
1312
import org.tensorflow.op.core.Gradients
1413
import org.tensorflow.op.core.Variable
1514
import org.tensorflow.op.train.ApplyFtrl
@@ -58,12 +57,6 @@ public class Ftrl(
5857
private var initialAccumulatorValue: Float = 0.0f,
5958
clipGradient: ClipGradientAction = NoClipGradient()
6059
) : Optimizer(clipGradient) {
61-
/** */
62-
private lateinit var learningRatePowerConst: Constant<Float>
63-
private lateinit var learningRateConst: Constant<Float>
64-
private lateinit var l1RegularizationStrengthConst: Constant<Float>
65-
private lateinit var l2RegularizationStrengthConst: Constant<Float>
66-
private lateinit var l2ShrinkageRegularizationStrengthConst: Constant<Float>
6760

6861
init {
6962
require(learningRate >= 0.0f) { "Learning rate $learningRate should be >= 0.0." }
@@ -82,11 +75,11 @@ public class Ftrl(
8275
): List<Operand<Float>> {
8376
val targets = mutableListOf<Operand<Float>>()
8477

85-
l1RegularizationStrengthConst = tf.constant(l1RegularizationStrength, getDType())
86-
l2RegularizationStrengthConst = tf.constant(l2RegularizationStrength, getDType())
87-
learningRateConst = tf.constant(learningRate, getDType())
88-
l2ShrinkageRegularizationStrengthConst = tf.constant(l2ShrinkageRegularizationStrength, getDType())
89-
learningRatePowerConst = tf.constant(learningRatePower, getDType())
78+
val l1RegularizationStrengthConst = tf.constant(l1RegularizationStrength, getDType())
79+
val l2RegularizationStrengthConst = tf.constant(l2RegularizationStrength, getDType())
80+
val learningRateConst = tf.constant(learningRate, getDType())
81+
val l2ShrinkageRegularizationStrengthConst = tf.constant(l2ShrinkageRegularizationStrength, getDType())
82+
val learningRatePowerConst = tf.constant(learningRatePower, getDType())
9083

9184
for ((i, variable) in weights.withIndex()) {
9285
val output = variable.asOutput()

api/src/main/kotlin/org/jetbrains/kotlinx/dl/api/core/optimizer/Momentum.kt

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@ package org.jetbrains.kotlinx.dl.api.core.optimizer
88
import org.jetbrains.kotlinx.dl.api.core.KGraph
99
import org.tensorflow.Operand
1010
import org.tensorflow.op.Ops
11-
import org.tensorflow.op.core.Constant
1211
import org.tensorflow.op.core.Gradients
1312
import org.tensorflow.op.core.Variable
1413
import org.tensorflow.op.train.ApplyMomentum
@@ -28,8 +27,6 @@ public class Momentum(
2827
private val useNesterov: Boolean = true,
2928
clipGradient: ClipGradientAction = NoClipGradient()
3029
) : Optimizer(clipGradient) {
31-
private lateinit var momentumConst: Constant<Float>
32-
private lateinit var learningRateConst: Constant<Float>
3330

3431
init {
3532
require(learningRate >= 0.0f) { "Learning rate $learningRate should be >= 0.0." }
@@ -44,8 +41,8 @@ public class Momentum(
4441
): List<Operand<Float>> {
4542
val targets = mutableListOf<Operand<Float>>()
4643

47-
learningRateConst = tf.constant(learningRate)
48-
momentumConst = tf.constant(momentum)
44+
val learningRateConst = tf.constant(learningRate)
45+
val momentumConst = tf.constant(momentum)
4946

5047
for ((i, variable) in weights.withIndex()) {
5148
val slot = createSlot(MOMENTUM, variable.asOutput(), tf, graph)

api/src/main/kotlin/org/jetbrains/kotlinx/dl/api/core/optimizer/RMSProp.kt

Lines changed: 4 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@ import org.jetbrains.kotlinx.dl.api.core.KGraph
99
import org.jetbrains.kotlinx.dl.api.core.util.getDType
1010
import org.tensorflow.Operand
1111
import org.tensorflow.op.Ops
12-
import org.tensorflow.op.core.Constant
1312
import org.tensorflow.op.core.Gradients
1413
import org.tensorflow.op.core.Variable
1514
import org.tensorflow.op.train.ApplyCenteredRmsProp
@@ -37,11 +36,6 @@ public class RMSProp(
3736
clipGradient: ClipGradientAction = NoClipGradient()
3837
) : Optimizer(clipGradient) {
3938

40-
private lateinit var epsilonConstant: Constant<Float>
41-
private lateinit var learningRateConst: Constant<Float>
42-
private lateinit var decayConst: Constant<Float>
43-
private lateinit var momentumConst: Constant<Float>
44-
4539
init {
4640
require(learningRate >= 0.0f) { "Learning rate $learningRate should be >= 0.0." }
4741
require(momentum >= 0.0f) { "Momentum $momentum should be >= 0.0." }
@@ -57,10 +51,10 @@ public class RMSProp(
5751
): List<Operand<Float>> {
5852
val targets = mutableListOf<Operand<Float>>()
5953

60-
decayConst = tf.constant(decay, getDType())
61-
momentumConst = tf.constant(momentum, getDType())
62-
learningRateConst = tf.constant(learningRate, getDType())
63-
epsilonConstant = tf.constant(epsilon, getDType())
54+
val decayConst = tf.constant(decay, getDType())
55+
val momentumConst = tf.constant(momentum, getDType())
56+
val learningRateConst = tf.constant(learningRate, getDType())
57+
val epsilonConstant = tf.constant(epsilon, getDType())
6458

6559
for ((i, variable) in weights.withIndex()) {
6660
val output = variable.asOutput()

0 commit comments

Comments
 (0)