Skip to content

Commit 065cdea

Browse files
committed
Revert "Squashed commit master (merge) to resolve conflicts"
This reverts commit 0af059c.
1 parent 0af059c commit 065cdea

File tree

143 files changed

+1162
-2155
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

143 files changed

+1162
-2155
lines changed

.github/bot_config.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,3 +16,4 @@
1616
# A list of assignees
1717
assignees:
1818
- tilakrayal
19+
- sushreebarsa

keras/activations.py

Lines changed: 44 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -63,12 +63,12 @@ def softmax(x, axis=-1):
6363
The input values in are the log-odds of the resulting probability.
6464
6565
Args:
66-
x : Input tensor.
67-
axis: Integer, axis along which the softmax normalization is applied.
66+
x : Input tensor.
67+
axis: Integer, axis along which the softmax normalization is applied.
6868
6969
Returns:
70-
Tensor, output of softmax transformation (all values are non-negative
71-
and sum to 1).
70+
Tensor, output of softmax transformation (all values are non-negative
71+
and sum to 1).
7272
7373
Examples:
7474
@@ -84,7 +84,22 @@ def softmax(x, axis=-1):
8484
>>> layer = tf.keras.layers.Dense(32,
8585
... activation=tf.keras.activations.softmax)
8686
"""
87-
return backend.softmax(x, axis)
87+
if x.shape.rank <= 1:
88+
raise ValueError(
89+
f"Cannot apply softmax to a tensor that is 1D. Received input: {x}"
90+
)
91+
92+
if isinstance(axis, int):
93+
output = tf.nn.softmax(x, axis=axis)
94+
else:
95+
# nn.softmax does not support tuple axis.
96+
numerator = tf.exp(x - tf.reduce_max(x, axis=axis, keepdims=True))
97+
denominator = tf.reduce_sum(numerator, axis=axis, keepdims=True)
98+
output = numerator / denominator
99+
100+
# Cache the logits to use for crossentropy loss.
101+
output._keras_logits = x
102+
return output
88103

89104

90105
@keras_export("keras.activations.elu")
@@ -123,11 +138,11 @@ def elu(x, alpha=1.0):
123138
Args:
124139
x: Input tensor.
125140
alpha: A scalar, slope of negative section. `alpha` controls the value
126-
to which an ELU saturates for negative net inputs.
141+
to which an ELU saturates for negative net inputs.
127142
128143
Returns:
129144
The exponential linear unit (ELU) activation function: `x` if `x > 0`
130-
and `alpha * (exp(x) - 1)` if `x < 0`.
145+
and `alpha * (exp(x) - 1)` if `x < 0`.
131146
132147
133148
Reference:
@@ -181,9 +196,9 @@ def selu(x):
181196
182197
Notes:
183198
- To be used together with the
184-
`tf.keras.initializers.LecunNormal` initializer.
199+
`tf.keras.initializers.LecunNormal` initializer.
185200
- To be used together with the dropout variant
186-
`tf.keras.layers.AlphaDropout` (not regular dropout).
201+
`tf.keras.layers.AlphaDropout` (not regular dropout).
187202
188203
References:
189204
- [Klambauer et al., 2017](https://arxiv.org/abs/1706.02515)
@@ -260,7 +275,7 @@ def swish(x):
260275
The swish activation applied to `x` (see reference paper for details).
261276
262277
Reference:
263-
- [Ramachandran et al., 2017](https://arxiv.org/abs/1710.05941)
278+
- [Ramachandran et al., 2017](https://arxiv.org/abs/1710.05941)
264279
"""
265280
return tf.nn.silu(x)
266281

@@ -292,16 +307,16 @@ def relu(x, alpha=0.0, max_value=None, threshold=0.0):
292307
Args:
293308
x: Input `tensor` or `variable`.
294309
alpha: A `float` that governs the slope for values lower than the
295-
threshold.
310+
threshold.
296311
max_value: A `float` that sets the saturation threshold (the largest
297-
value the function will return).
312+
value the function will return).
298313
threshold: A `float` giving the threshold value of the activation
299-
function below which values will be damped or set to zero.
314+
function below which values will be damped or set to zero.
300315
301316
Returns:
302-
A `Tensor` representing the input tensor, transformed by the relu
303-
activation function. Tensor will be of the same shape and dtype of
304-
input `x`.
317+
A `Tensor` representing the input tensor,
318+
transformed by the relu activation function.
319+
Tensor will be of the same shape and dtype of input `x`.
305320
"""
306321
return backend.relu(
307322
x, alpha=alpha, max_value=max_value, threshold=threshold
@@ -343,8 +358,8 @@ def gelu(x, approximate=False):
343358
if `approximate` is `False`.
344359
345360
Reference:
346-
- [Gaussian Error Linear Units (GELUs)](https://arxiv.org/abs/1606.08415)
347-
""" # noqa: E501
361+
- [Gaussian Error Linear Units (GELUs)](https://arxiv.org/abs/1606.08415)
362+
"""
348363
return tf.nn.gelu(x, approximate)
349364

350365

@@ -397,7 +412,10 @@ def sigmoid(x):
397412
Returns:
398413
Tensor with the sigmoid activation: `1 / (1 + exp(-x))`.
399414
"""
400-
return backend.sigmoid(x)
415+
output = tf.sigmoid(x)
416+
# Cache the logits to use for crossentropy loss.
417+
output._keras_logits = x
418+
return output
401419

402420

403421
@keras_export("keras.activations.exponential")
@@ -441,11 +459,11 @@ def hard_sigmoid(x):
441459
x: Input tensor.
442460
443461
Returns:
444-
The hard sigmoid activation, defined as:
462+
The hard sigmoid activation, defined as:
445463
446-
- `if x < -2.5: return 0`
447-
- `if x > 2.5: return 1`
448-
- `if -2.5 <= x <= 2.5: return 0.2 * x + 0.5`
464+
- `if x < -2.5: return 0`
465+
- `if x > 2.5: return 1`
466+
- `if -2.5 <= x <= 2.5: return 0.2 * x + 0.5`
449467
"""
450468
return backend.hard_sigmoid(x)
451469

@@ -517,8 +535,6 @@ def serialize(activation, use_legacy_format=False):
517535
518536
Args:
519537
activation : Function object.
520-
use_legacy_format: Boolean, whether to use the legacy format for
521-
serialization. Defaults to False.
522538
523539
Returns:
524540
String denoting the name attribute of the input function
@@ -592,11 +608,9 @@ def deserialize(name, custom_objects=None, use_legacy_format=False):
592608
"""Returns activation function given a string identifier.
593609
594610
Args:
595-
name: The name of the activation function.
596-
custom_objects: Optional `{function_name: function_obj}`
597-
dictionary listing user-provided activation functions.
598-
use_legacy_format: Boolean, whether to use the legacy format for
599-
deserialization. Defaults to False.
611+
name: The name of the activation function.
612+
custom_objects: Optional `{function_name: function_obj}`
613+
dictionary listing user-provided activation functions.
600614
601615
Returns:
602616
Corresponding activation function.

keras/api/BUILD

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@ load("//keras/api:api_gen.bzl", "gen_api_init_files")
55
load("//keras/api:api_init_files.bzl", "KERAS_API_INIT_FILES", "KERAS_API_INIT_FILES_V1")
66

77
package(
8-
# copybara:uncomment default_applicable_licenses = ["//keras:license"],
98
default_visibility = [
109
"//keras:friends",
1110
"//third_party/py/tensorflow:__subpackages__",

keras/api/api_gen.bzl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,7 @@ def gen_api_init_files(
119119
_make_cmd(api_gen_binary_target, flags, loading = "default"),
120120
),
121121
srcs = srcs,
122-
tools = [":" + api_gen_binary_target],
122+
exec_tools = [":" + api_gen_binary_target],
123123
visibility = ["//visibility:public"],
124124
)
125125

keras/api/golden/BUILD

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
# TensorFlow API backwards compatibility test goldens.
22

33
package(
4-
# copybara:uncomment default_applicable_licenses = ["//keras:license"],
54
default_visibility = ["//visibility:public"],
65
licenses = ["notice"], # Apache 2.0
76
)

keras/api/golden/v1/tensorflow.keras.-model.pbtxt

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -36,10 +36,6 @@ tf_class {
3636
name: "dynamic"
3737
mtype: "<type \'property\'>"
3838
}
39-
member {
40-
name: "enable_tune_steps_per_execution"
41-
mtype: "<type \'property\'>"
42-
}
4339
member {
4440
name: "inbound_nodes"
4541
mtype: "<type \'property\'>"

keras/api/golden/v1/tensorflow.keras.-sequential.pbtxt

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -38,10 +38,6 @@ tf_class {
3838
name: "dynamic"
3939
mtype: "<type \'property\'>"
4040
}
41-
member {
42-
name: "enable_tune_steps_per_execution"
43-
mtype: "<type \'property\'>"
44-
}
4541
member {
4642
name: "inbound_nodes"
4743
mtype: "<type \'property\'>"

keras/api/golden/v1/tensorflow.keras.experimental.-linear-model.pbtxt

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -37,10 +37,6 @@ tf_class {
3737
name: "dynamic"
3838
mtype: "<type \'property\'>"
3939
}
40-
member {
41-
name: "enable_tune_steps_per_execution"
42-
mtype: "<type \'property\'>"
43-
}
4440
member {
4541
name: "inbound_nodes"
4642
mtype: "<type \'property\'>"

keras/api/golden/v1/tensorflow.keras.experimental.-wide-deep-model.pbtxt

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -37,10 +37,6 @@ tf_class {
3737
name: "dynamic"
3838
mtype: "<type \'property\'>"
3939
}
40-
member {
41-
name: "enable_tune_steps_per_execution"
42-
mtype: "<type \'property\'>"
43-
}
4440
member {
4541
name: "inbound_nodes"
4642
mtype: "<type \'property\'>"

keras/api/golden/v1/tensorflow.keras.models.-linear-model.pbtxt

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -37,10 +37,6 @@ tf_class {
3737
name: "dynamic"
3838
mtype: "<type \'property\'>"
3939
}
40-
member {
41-
name: "enable_tune_steps_per_execution"
42-
mtype: "<type \'property\'>"
43-
}
4440
member {
4541
name: "inbound_nodes"
4642
mtype: "<type \'property\'>"

0 commit comments

Comments
 (0)