From 7fc9edc5ea770e70da045dfa4c1358c4b39ccb86 Mon Sep 17 00:00:00 2001 From: Samuel Marks <807580+SamuelMarks@users.noreply.github.com> Date: Mon, 2 Jun 2025 13:17:31 -0600 Subject: [PATCH 1/5] [*.py] Use f-strings and `os.path.join` throughout --- api_gen.py | 9 ++- .../distributed_training_with_tensorflow.py | 4 +- guides/training_with_built_in_methods.py | 4 +- integration_tests/import_test.py | 13 ++-- integration_tests/model_visualization_test.py | 6 +- keras/src/applications/convnext.py | 36 +++++------ keras/src/applications/densenet.py | 39 ++++++----- keras/src/applications/efficientnet.py | 32 +++++----- keras/src/applications/efficientnet_v2.py | 56 ++++++++-------- keras/src/applications/inception_resnet_v2.py | 12 ++-- keras/src/applications/inception_v3.py | 4 +- keras/src/applications/mobilenet_v2.py | 33 ++++------ keras/src/applications/mobilenet_v3.py | 30 ++++----- keras/src/applications/nasnet.py | 14 ++-- keras/src/applications/resnet.py | 64 +++++++++---------- keras/src/applications/xception.py | 18 +++--- keras/src/backend/common/variables.py | 2 +- .../src/backend/jax/distribution_lib_test.py | 2 +- keras/src/backend/jax/export.py | 2 +- keras/src/callbacks/backup_and_restore.py | 6 +- keras/src/callbacks/model_checkpoint.py | 2 +- keras/src/callbacks/swap_ema_weights_test.py | 9 +-- keras/src/callbacks/tensorboard.py | 6 +- keras/src/datasets/boston_housing.py | 2 +- keras/src/datasets/california_housing.py | 2 +- keras/src/datasets/cifar100.py | 4 +- keras/src/datasets/imdb.py | 4 +- keras/src/datasets/mnist.py | 2 +- keras/src/datasets/reuters.py | 4 +- keras/src/export/tf2onnx_lib.py | 6 +- keras/src/legacy/backend.py | 28 ++------ keras/src/legacy/preprocessing/image.py | 32 +++------- keras/src/models/cloning_test.py | 4 +- keras/src/models/functional.py | 4 +- keras/src/models/model.py | 4 +- keras/src/ops/function.py | 2 +- keras/src/ops/numpy.py | 10 +-- keras/src/ops/operation.py | 5 +- keras/src/optimizers/base_optimizer.py | 11 ++-- keras/src/saving/file_editor.py | 38 +++++------ keras/src/saving/object_registration.py | 2 +- keras/src/saving/saving_lib.py | 6 +- keras/src/saving/serialization_lib.py | 4 +- keras/src/trainers/compile_utils.py | 2 +- keras/src/utils/file_utils.py | 17 +++-- keras/src/utils/file_utils_test.py | 33 +++++----- keras/src/utils/io_utils.py | 2 +- keras/src/utils/jax_layer_test.py | 4 +- keras/src/utils/model_visualization.py | 2 +- keras/src/utils/naming_test.py | 2 +- keras/src/utils/progbar.py | 8 +-- keras/src/utils/summary_utils.py | 17 ++++- 52 files changed, 318 insertions(+), 346 deletions(-) diff --git a/api_gen.py b/api_gen.py index af7abe71113a..89c0be2c67e0 100644 --- a/api_gen.py +++ b/api_gen.py @@ -84,9 +84,12 @@ def create_legacy_directory(package_dir): for fname in fnames: if fname.endswith(".py"): legacy_fpath = os.path.join(root, fname) - tf_keras_root = root.replace("/_legacy", "/_tf_keras/keras") + tf_keras_root = root.replace( + os.path.join(os.path.sep, "_legacy"), + os.path.join(os.path.sep, "_tf_keras", "keras") + ) core_api_fpath = os.path.join( - root.replace("/_legacy", ""), fname + root.replace(os.path.join(os.path.sep, "_legacy"), ""), fname ) if not os.path.exists(tf_keras_root): os.makedirs(tf_keras_root) @@ -125,7 +128,7 @@ def create_legacy_directory(package_dir): r"\n", core_api_contents, ) - legacy_contents = core_api_contents + "\n" + legacy_contents + legacy_contents = f"{core_api_contents}\n{legacy_contents}" with open(tf_keras_fpath, "w") as f: f.write(legacy_contents) diff --git a/guides/distributed_training_with_tensorflow.py b/guides/distributed_training_with_tensorflow.py index 8ebbe1ee0236..7c0e9b556532 100644 --- a/guides/distributed_training_with_tensorflow.py +++ b/guides/distributed_training_with_tensorflow.py @@ -194,7 +194,7 @@ def make_or_restore_model(): # Either restore the latest model, or create a fresh one # if there is no checkpoint available. checkpoints = [ - checkpoint_dir + "/" + name for name in os.listdir(checkpoint_dir) + os.path.join(checkpoint_dir, name) for name in os.listdir(checkpoint_dir) ] if checkpoints: latest_checkpoint = max(checkpoints, key=os.path.getctime) @@ -216,7 +216,7 @@ def run_training(epochs=1): # This callback saves a SavedModel every epoch # We include the current epoch in the folder name. keras.callbacks.ModelCheckpoint( - filepath=checkpoint_dir + "/ckpt-{epoch}.keras", + filepath=os.path.join(checkpoint_dir, "ckpt-{epoch}.keras"), save_freq="epoch", ) ] diff --git a/guides/training_with_built_in_methods.py b/guides/training_with_built_in_methods.py index b539d005815d..a3dea4b32d87 100644 --- a/guides/training_with_built_in_methods.py +++ b/guides/training_with_built_in_methods.py @@ -1133,7 +1133,7 @@ def make_or_restore_model(): # Either restore the latest model, or create a fresh one # if there is no checkpoint available. checkpoints = [ - checkpoint_dir + "/" + name for name in os.listdir(checkpoint_dir) + os.path.join(checkpoint_dir, name) for name in os.listdir(checkpoint_dir) ] if checkpoints: latest_checkpoint = max(checkpoints, key=os.path.getctime) @@ -1148,7 +1148,7 @@ def make_or_restore_model(): # This callback saves the model every 100 batches. # We include the training loss in the saved model name. keras.callbacks.ModelCheckpoint( - filepath=checkpoint_dir + "/model-loss={loss:.2f}.keras", save_freq=100 + filepath=os.path.join(checkpoint_dir, "model-loss={loss:.2f}.keras"), save_freq=100 ) ] model.fit(x_train, y_train, epochs=1, callbacks=callbacks) diff --git a/integration_tests/import_test.py b/integration_tests/import_test.py index e7af37f23c83..a27f0b1de5e5 100644 --- a/integration_tests/import_test.py +++ b/integration_tests/import_test.py @@ -52,18 +52,15 @@ def manage_venv_installs(whl_path): backend_pkg, backend_extra_url = BACKEND_REQ[backend.backend()] install_setup = [ # Installs the backend's package and common requirements - "pip install " + backend_extra_url + backend_pkg, + f"pip install {backend_extra_url}{backend_pkg}", "pip install -r requirements-common.txt", "pip install pytest", # Ensure other backends are uninstalled - "pip uninstall -y " - + BACKEND_REQ[other_backends[0]][0] - + " " - + BACKEND_REQ[other_backends[1]][0] - + " " - + BACKEND_REQ[other_backends[2]][0], + "pip uninstall -y {0} {1} {2}".format(BACKEND_REQ[other_backends[0]][0], + BACKEND_REQ[other_backends[1]][0], + BACKEND_REQ[other_backends[2]][0]), # Install `.whl` package - "pip install " + whl_path, + f"pip install {whl_path}", ] run_commands_venv(install_setup) diff --git a/integration_tests/model_visualization_test.py b/integration_tests/model_visualization_test.py index df718274955f..965734958fe0 100644 --- a/integration_tests/model_visualization_test.py +++ b/integration_tests/model_visualization_test.py @@ -44,7 +44,7 @@ def get_node_dict(graph, path=""): for subgraph in graph.get_subgraphs(): sub_nodes = get_node_dict( - subgraph, path=path + subgraph.get_label() + " > " + subgraph, path=f"{path}{subgraph.get_label()} > " ) nodes.update(sub_nodes) @@ -85,7 +85,7 @@ def get_edges(graph): class ModelVisualizationTest(testing.TestCase): def multi_plot_model(self, model, name, expand_nested=False): if expand_nested: - name = name + "-expand_nested" + name = f"{name}-expand_nested" TEST_CASES = [ {}, @@ -130,7 +130,7 @@ def multi_plot_model(self, model, name, expand_nested=False): for test_case in TEST_CASES: tags = [v if k == "rankdir" else k for k, v in test_case.items()] - file_name = "-".join([name] + tags) + ".png" + file_name = f"{'-'.join([name] + tags)}.png" plot_model( model, file_name, expand_nested=expand_nested, **test_case ) diff --git a/keras/src/applications/convnext.py b/keras/src/applications/convnext.py index 1b3d683a9b8e..710018f26a36 100644 --- a/keras/src/applications/convnext.py +++ b/keras/src/applications/convnext.py @@ -254,25 +254,25 @@ def apply(inputs): kernel_size=7, padding="same", groups=projection_dim, - name=name + "_depthwise_conv", + name=f"{name}_depthwise_conv", )(x) - x = layers.LayerNormalization(epsilon=1e-6, name=name + "_layernorm")(x) - x = layers.Dense(4 * projection_dim, name=name + "_pointwise_conv_1")(x) - x = layers.Activation("gelu", name=name + "_gelu")(x) - x = layers.Dense(projection_dim, name=name + "_pointwise_conv_2")(x) + x = layers.LayerNormalization(epsilon=1e-6, name=f"{name}_layernorm")(x) + x = layers.Dense(4 * projection_dim, name=f"{name}_pointwise_conv_1")(x) + x = layers.Activation("gelu", name=f"{name}_gelu")(x) + x = layers.Dense(projection_dim, name=f"{name}_pointwise_conv_2")(x) if layer_scale_init_value is not None: x = LayerScale( layer_scale_init_value, projection_dim, - name=name + "_layer_scale", + name=f"{name}_layer_scale", )(x) if drop_path_rate: layer = StochasticDepth( - drop_path_rate, name=name + "_stochastic_depth" + drop_path_rate, name=f"{name}_stochastic_depth" ) else: - layer = layers.Activation("linear", name=name + "_identity") + layer = layers.Activation("linear", name=f"{name}_identity") return inputs + layer(x) @@ -292,7 +292,7 @@ def apply(x): (0.224 * 255) ** 2, (0.225 * 255) ** 2, ], - name=name + "_prestem_normalization", + name=f"{name}_prestem_normalization", )(x) return x @@ -314,14 +314,14 @@ def Head(num_classes=1000, classifier_activation=None, name=None): name = str(backend.get_uid("head")) def apply(x): - x = layers.GlobalAveragePooling2D(name=name + "_head_gap")(x) + x = layers.GlobalAveragePooling2D(name=f"{name}_head_gap")(x) x = layers.LayerNormalization( - epsilon=1e-6, name=name + "_head_layernorm" + epsilon=1e-6, name=f"{name}_head_layernorm" )(x) x = layers.Dense( num_classes, activation=classifier_activation, - name=name + "_head_dense", + name=f"{name}_head_dense", )(x) return x @@ -452,13 +452,13 @@ def ConvNeXt( projection_dims[0], kernel_size=4, strides=4, - name=name + "_stem_conv", + name=f"{name}_stem_conv", ), layers.LayerNormalization( - epsilon=1e-6, name=name + "_stem_layernorm" + epsilon=1e-6, name=f"{name}_stem_layernorm" ), ], - name=name + "_stem", + name=f"{name}_stem", ) # Downsampling blocks. @@ -471,16 +471,16 @@ def ConvNeXt( [ layers.LayerNormalization( epsilon=1e-6, - name=name + "_downsampling_layernorm_" + str(i), + name=f"{name}_downsampling_layernorm_{i}", ), layers.Conv2D( projection_dims[i + 1], kernel_size=2, strides=2, - name=name + "_downsampling_conv_" + str(i), + name=f"{name}_downsampling_conv_{i}", ), ], - name=name + "_downsampling_block_" + str(i), + name=f"{name}_downsampling_block_{i}", ) downsample_layers.append(downsample_layer) diff --git a/keras/src/applications/densenet.py b/keras/src/applications/densenet.py index 436401d258bf..476256ed3c47 100644 --- a/keras/src/applications/densenet.py +++ b/keras/src/applications/densenet.py @@ -10,25 +10,22 @@ "https://storage.googleapis.com/tensorflow/keras-applications/densenet/" ) DENSENET121_WEIGHT_PATH = ( - BASE_WEIGHTS_PATH + "densenet121_weights_tf_dim_ordering_tf_kernels.h5" + f"{BASE_WEIGHTS_PATH}densenet121_weights_tf_dim_ordering_tf_kernels.h5" ) DENSENET121_WEIGHT_PATH_NO_TOP = ( - BASE_WEIGHTS_PATH - + "densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5" + f"{BASE_WEIGHTS_PATH}densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5" ) DENSENET169_WEIGHT_PATH = ( - BASE_WEIGHTS_PATH + "densenet169_weights_tf_dim_ordering_tf_kernels.h5" + f"{BASE_WEIGHTS_PATH}densenet169_weights_tf_dim_ordering_tf_kernels.h5" ) DENSENET169_WEIGHT_PATH_NO_TOP = ( - BASE_WEIGHTS_PATH - + "densenet169_weights_tf_dim_ordering_tf_kernels_notop.h5" + f"{BASE_WEIGHTS_PATH}densenet169_weights_tf_dim_ordering_tf_kernels_notop.h5" ) DENSENET201_WEIGHT_PATH = ( - BASE_WEIGHTS_PATH + "densenet201_weights_tf_dim_ordering_tf_kernels.h5" + f"{BASE_WEIGHTS_PATH}densenet201_weights_tf_dim_ordering_tf_kernels.h5" ) DENSENET201_WEIGHT_PATH_NO_TOP = ( - BASE_WEIGHTS_PATH - + "densenet201_weights_tf_dim_ordering_tf_kernels_notop.h5" + f"{BASE_WEIGHTS_PATH}densenet201_weights_tf_dim_ordering_tf_kernels_notop.h5" ) @@ -44,7 +41,7 @@ def dense_block(x, blocks, name): Output tensor for the block. """ for i in range(blocks): - x = conv_block(x, 32, name=name + "_block" + str(i + 1)) + x = conv_block(x, 32, name=f"{name}_block" + str(i + 1)) return x @@ -61,16 +58,16 @@ def transition_block(x, reduction, name): """ bn_axis = 3 if backend.image_data_format() == "channels_last" else 1 x = layers.BatchNormalization( - axis=bn_axis, epsilon=1.001e-5, name=name + "_bn" + axis=bn_axis, epsilon=1.001e-5, name=f"{name}_bn" )(x) - x = layers.Activation("relu", name=name + "_relu")(x) + x = layers.Activation("relu", name=f"{name}_relu")(x) x = layers.Conv2D( int(x.shape[bn_axis] * reduction), 1, use_bias=False, - name=name + "_conv", + name=f"{name}_conv", )(x) - x = layers.AveragePooling2D(2, strides=2, name=name + "_pool")(x) + x = layers.AveragePooling2D(2, strides=2, name=f"{name}_pool")(x) return x @@ -87,20 +84,20 @@ def conv_block(x, growth_rate, name): """ bn_axis = 3 if backend.image_data_format() == "channels_last" else 1 x1 = layers.BatchNormalization( - axis=bn_axis, epsilon=1.001e-5, name=name + "_0_bn" + axis=bn_axis, epsilon=1.001e-5, name=f"{name}_0_bn" )(x) - x1 = layers.Activation("relu", name=name + "_0_relu")(x1) + x1 = layers.Activation("relu", name=f"{name}_0_relu")(x1) x1 = layers.Conv2D( - 4 * growth_rate, 1, use_bias=False, name=name + "_1_conv" + 4 * growth_rate, 1, use_bias=False, name=f"{name}_1_conv" )(x1) x1 = layers.BatchNormalization( - axis=bn_axis, epsilon=1.001e-5, name=name + "_1_bn" + axis=bn_axis, epsilon=1.001e-5, name=f"{name}_1_bn" )(x1) - x1 = layers.Activation("relu", name=name + "_1_relu")(x1) + x1 = layers.Activation("relu", name=f"{name}_1_relu")(x1) x1 = layers.Conv2D( - growth_rate, 3, padding="same", use_bias=False, name=name + "_2_conv" + growth_rate, 3, padding="same", use_bias=False, name=f"{name}_2_conv" )(x1) - x = layers.Concatenate(axis=bn_axis, name=name + "_concat")([x, x1]) + x = layers.Concatenate(axis=bn_axis, name=f"{name}_concat")([x, x1]) return x diff --git a/keras/src/applications/efficientnet.py b/keras/src/applications/efficientnet.py index 2b0229c194a7..44dcad9bc8c2 100644 --- a/keras/src/applications/efficientnet.py +++ b/keras/src/applications/efficientnet.py @@ -479,10 +479,10 @@ def block( padding="same", use_bias=False, kernel_initializer=CONV_KERNEL_INITIALIZER, - name=name + "expand_conv", + name=f"{name}expand_conv", )(inputs) - x = layers.BatchNormalization(axis=bn_axis, name=name + "expand_bn")(x) - x = layers.Activation(activation, name=name + "expand_activation")(x) + x = layers.BatchNormalization(axis=bn_axis, name=f"{name}expand_bn")(x) + x = layers.Activation(activation, name=f"{name}expand_activation")(x) else: x = inputs @@ -490,7 +490,7 @@ def block( if strides == 2: x = layers.ZeroPadding2D( padding=imagenet_utils.correct_pad(x, kernel_size), - name=name + "dwconv_pad", + name=f"{name}dwconv_pad", )(x) conv_pad = "valid" else: @@ -501,27 +501,27 @@ def block( padding=conv_pad, use_bias=False, depthwise_initializer=CONV_KERNEL_INITIALIZER, - name=name + "dwconv", + name=f"{name}dwconv", )(x) - x = layers.BatchNormalization(axis=bn_axis, name=name + "bn")(x) - x = layers.Activation(activation, name=name + "activation")(x) + x = layers.BatchNormalization(axis=bn_axis, name=f"{name}bn")(x) + x = layers.Activation(activation, name=f"{name}activation")(x) # Squeeze and Excitation phase if 0 < se_ratio <= 1: filters_se = max(1, int(filters_in * se_ratio)) - se = layers.GlobalAveragePooling2D(name=name + "se_squeeze")(x) + se = layers.GlobalAveragePooling2D(name=f"{name}se_squeeze")(x) if bn_axis == 1: se_shape = (filters, 1, 1) else: se_shape = (1, 1, filters) - se = layers.Reshape(se_shape, name=name + "se_reshape")(se) + se = layers.Reshape(se_shape, name=f"{name}se_reshape")(se) se = layers.Conv2D( filters_se, 1, padding="same", activation=activation, kernel_initializer=CONV_KERNEL_INITIALIZER, - name=name + "se_reduce", + name=f"{name}se_reduce", )(se) se = layers.Conv2D( filters, @@ -529,9 +529,9 @@ def block( padding="same", activation="sigmoid", kernel_initializer=CONV_KERNEL_INITIALIZER, - name=name + "se_expand", + name=f"{name}se_expand", )(se) - x = layers.multiply([x, se], name=name + "se_excite") + x = layers.multiply([x, se], name=f"{name}se_excite") # Output phase x = layers.Conv2D( @@ -540,15 +540,15 @@ def block( padding="same", use_bias=False, kernel_initializer=CONV_KERNEL_INITIALIZER, - name=name + "project_conv", + name=f"{name}project_conv", )(x) - x = layers.BatchNormalization(axis=bn_axis, name=name + "project_bn")(x) + x = layers.BatchNormalization(axis=bn_axis, name=f"{name}project_bn")(x) if id_skip and strides == 1 and filters_in == filters_out: if drop_rate > 0: x = layers.Dropout( - drop_rate, noise_shape=(None, 1, 1, 1), name=name + "drop" + drop_rate, noise_shape=(None, 1, 1, 1), name=f"{name}drop" )(x) - x = layers.add([x, inputs], name=name + "add") + x = layers.add([x, inputs], name=f"{name}add") return x diff --git a/keras/src/applications/efficientnet_v2.py b/keras/src/applications/efficientnet_v2.py index b0b59470b349..86e8e2827844 100644 --- a/keras/src/applications/efficientnet_v2.py +++ b/keras/src/applications/efficientnet_v2.py @@ -632,14 +632,14 @@ def apply(inputs): padding="same", data_format=backend.image_data_format(), use_bias=False, - name=name + "expand_conv", + name=f"{name}expand_conv", )(inputs) x = layers.BatchNormalization( axis=bn_axis, momentum=bn_momentum, - name=name + "expand_bn", + name=f"{name}expand_bn", )(x) - x = layers.Activation(activation, name=name + "expand_activation")( + x = layers.Activation(activation, name=f"{name}expand_activation")( x ) else: @@ -653,22 +653,22 @@ def apply(inputs): padding="same", data_format=backend.image_data_format(), use_bias=False, - name=name + "dwconv2", + name=f"{name}dwconv2", )(x) x = layers.BatchNormalization( - axis=bn_axis, momentum=bn_momentum, name=name + "bn" + axis=bn_axis, momentum=bn_momentum, name=f"{name}bn" )(x) - x = layers.Activation(activation, name=name + "activation")(x) + x = layers.Activation(activation, name=f"{name}activation")(x) # Squeeze and excite if 0 < se_ratio <= 1: filters_se = max(1, int(input_filters * se_ratio)) - se = layers.GlobalAveragePooling2D(name=name + "se_squeeze")(x) + se = layers.GlobalAveragePooling2D(name=f"{name}se_squeeze")(x) if bn_axis == 1: se_shape = (filters, 1, 1) else: se_shape = (1, 1, filters) - se = layers.Reshape(se_shape, name=name + "se_reshape")(se) + se = layers.Reshape(se_shape, name=f"{name}se_reshape")(se) se = layers.Conv2D( filters_se, @@ -676,7 +676,7 @@ def apply(inputs): padding="same", activation=activation, kernel_initializer=CONV_KERNEL_INITIALIZER, - name=name + "se_reduce", + name=f"{name}se_reduce", )(se) se = layers.Conv2D( filters, @@ -684,10 +684,10 @@ def apply(inputs): padding="same", activation="sigmoid", kernel_initializer=CONV_KERNEL_INITIALIZER, - name=name + "se_expand", + name=f"{name}se_expand", )(se) - x = layers.multiply([x, se], name=name + "se_excite") + x = layers.multiply([x, se], name=f"{name}se_excite") # Output phase x = layers.Conv2D( @@ -698,10 +698,10 @@ def apply(inputs): padding="same", data_format=backend.image_data_format(), use_bias=False, - name=name + "project_conv", + name=f"{name}project_conv", )(x) x = layers.BatchNormalization( - axis=bn_axis, momentum=bn_momentum, name=name + "project_bn" + axis=bn_axis, momentum=bn_momentum, name=f"{name}project_bn" )(x) if strides == 1 and input_filters == output_filters: @@ -709,9 +709,9 @@ def apply(inputs): x = layers.Dropout( survival_probability, noise_shape=(None, 1, 1, 1), - name=name + "drop", + name=f"{name}drop", )(x) - x = layers.add([x, inputs], name=name + "add") + x = layers.add([x, inputs], name=f"{name}add") return x @@ -747,13 +747,13 @@ def apply(inputs): data_format=backend.image_data_format(), padding="same", use_bias=False, - name=name + "expand_conv", + name=f"{name}expand_conv", )(inputs) x = layers.BatchNormalization( - axis=bn_axis, momentum=bn_momentum, name=name + "expand_bn" + axis=bn_axis, momentum=bn_momentum, name=f"{name}expand_bn" )(x) x = layers.Activation( - activation=activation, name=name + "expand_activation" + activation=activation, name=f"{name}expand_activation" )(x) else: x = inputs @@ -761,13 +761,13 @@ def apply(inputs): # Squeeze and excite if 0 < se_ratio <= 1: filters_se = max(1, int(input_filters * se_ratio)) - se = layers.GlobalAveragePooling2D(name=name + "se_squeeze")(x) + se = layers.GlobalAveragePooling2D(name=f"{name}se_squeeze")(x) if bn_axis == 1: se_shape = (filters, 1, 1) else: se_shape = (1, 1, filters) - se = layers.Reshape(se_shape, name=name + "se_reshape")(se) + se = layers.Reshape(se_shape, name=f"{name}se_reshape")(se) se = layers.Conv2D( filters_se, @@ -775,7 +775,7 @@ def apply(inputs): padding="same", activation=activation, kernel_initializer=CONV_KERNEL_INITIALIZER, - name=name + "se_reduce", + name=f"{name}se_reduce", )(se) se = layers.Conv2D( filters, @@ -783,10 +783,10 @@ def apply(inputs): padding="same", activation="sigmoid", kernel_initializer=CONV_KERNEL_INITIALIZER, - name=name + "se_expand", + name=f"{name}se_expand", )(se) - x = layers.multiply([x, se], name=name + "se_excite") + x = layers.multiply([x, se], name=f"{name}se_excite") # Output phase: x = layers.Conv2D( @@ -796,14 +796,14 @@ def apply(inputs): kernel_initializer=CONV_KERNEL_INITIALIZER, padding="same", use_bias=False, - name=name + "project_conv", + name=f"{name}project_conv", )(x) x = layers.BatchNormalization( - axis=bn_axis, momentum=bn_momentum, name=name + "project_bn" + axis=bn_axis, momentum=bn_momentum, name=f"{name}project_bn" )(x) if expand_ratio == 1: x = layers.Activation( - activation=activation, name=name + "project_activation" + activation=activation, name=f"{name}project_activation" )(x) # Residual: @@ -812,9 +812,9 @@ def apply(inputs): x = layers.Dropout( survival_probability, noise_shape=(None, 1, 1, 1), - name=name + "drop", + name=f"{name}drop", )(x) - x = layers.add([x, inputs], name=name + "add") + x = layers.add([x, inputs], name=f"{name}add") return x return apply diff --git a/keras/src/applications/inception_resnet_v2.py b/keras/src/applications/inception_resnet_v2.py index 422a1899d75d..f93fdbe5e8b6 100644 --- a/keras/src/applications/inception_resnet_v2.py +++ b/keras/src/applications/inception_resnet_v2.py @@ -281,12 +281,12 @@ def conv2d_bn( )(x) if not use_bias: bn_axis = 1 if backend.image_data_format() == "channels_first" else 3 - bn_name = None if name is None else name + "_bn" + bn_name = None if name is None else f"{name}_bn" x = layers.BatchNormalization(axis=bn_axis, scale=False, name=bn_name)( x ) if activation is not None: - ac_name = None if name is None else name + "_ac" + ac_name = None if name is None else f"{name}_ac" x = layers.Activation(activation, name=ac_name)(x) return x @@ -356,9 +356,9 @@ def inception_resnet_block(x, scale, block_type, block_idx, activation="relu"): "but got: " + str(block_type) ) - block_name = block_type + "_" + str(block_idx) + block_name = f"{block_type}_{block_idx}" channel_axis = 1 if backend.image_data_format() == "channels_first" else 3 - mixed = layers.Concatenate(axis=channel_axis, name=block_name + "_mixed")( + mixed = layers.Concatenate(axis=channel_axis, name=f"{block_name}_mixed")( branches ) up = conv2d_bn( @@ -367,12 +367,12 @@ def inception_resnet_block(x, scale, block_type, block_idx, activation="relu"): 1, activation=None, use_bias=True, - name=block_name + "_conv", + name=f"{block_name}_conv", ) x = CustomScaleLayer(scale)([x, up]) if activation is not None: - x = layers.Activation(activation, name=block_name + "_ac")(x) + x = layers.Activation(activation, name=f"{block_name}_ac")(x) return x diff --git a/keras/src/applications/inception_v3.py b/keras/src/applications/inception_v3.py index bde5a34da7f4..c1b17bc3f1a1 100644 --- a/keras/src/applications/inception_v3.py +++ b/keras/src/applications/inception_v3.py @@ -400,8 +400,8 @@ def conv2d_bn( Output tensor after applying `Conv2D` and `BatchNormalization`. """ if name is not None: - bn_name = name + "_bn" - conv_name = name + "_conv" + bn_name = f"{name}_bn" + conv_name = f"{name}_conv" else: bn_name = None conv_name = None diff --git a/keras/src/applications/mobilenet_v2.py b/keras/src/applications/mobilenet_v2.py index 1b4c3a1df1a1..b97aada53754 100644 --- a/keras/src/applications/mobilenet_v2.py +++ b/keras/src/applications/mobilenet_v2.py @@ -369,11 +369,7 @@ def MobileNetV2( if weights == "imagenet": if include_top: model_name = ( - "mobilenet_v2_weights_tf_dim_ordering_tf_kernels_" - + str(float(alpha)) - + "_" - + str(rows) - + ".h5" + f"mobilenet_v2_weights_tf_dim_ordering_tf_kernels_{float(alpha)}_{rows}.h5" ) weight_path = BASE_WEIGHT_PATH + model_name weights_path = file_utils.get_file( @@ -381,12 +377,7 @@ def MobileNetV2( ) else: model_name = ( - "mobilenet_v2_weights_tf_dim_ordering_tf_kernels_" - + str(float(alpha)) - + "_" - + str(rows) - + "_no_top" - + ".h5" + f"mobilenet_v2_weights_tf_dim_ordering_tf_kernels_{float(alpha)}_{rows}_no_top.h5" ) weight_path = BASE_WEIGHT_PATH + model_name weights_path = file_utils.get_file( @@ -419,22 +410,22 @@ def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id): padding="same", use_bias=False, activation=None, - name=prefix + "expand", + name=f"{prefix}expand", )(x) x = layers.BatchNormalization( axis=channel_axis, epsilon=1e-3, momentum=0.999, - name=prefix + "expand_BN", + name=f"{prefix}expand_BN", )(x) - x = layers.ReLU(6.0, name=prefix + "expand_relu")(x) + x = layers.ReLU(6.0, name=f"{prefix}expand_relu")(x) else: prefix = "expanded_conv_" # Depthwise 3x3 convolution. if stride == 2: x = layers.ZeroPadding2D( - padding=imagenet_utils.correct_pad(x, 3), name=prefix + "pad" + padding=imagenet_utils.correct_pad(x, 3), name=f"{prefix}pad" )(x) x = layers.DepthwiseConv2D( kernel_size=3, @@ -442,16 +433,16 @@ def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id): activation=None, use_bias=False, padding="same" if stride == 1 else "valid", - name=prefix + "depthwise", + name=f"{prefix}depthwise", )(x) x = layers.BatchNormalization( axis=channel_axis, epsilon=1e-3, momentum=0.999, - name=prefix + "depthwise_BN", + name=f"{prefix}depthwise_BN", )(x) - x = layers.ReLU(6.0, name=prefix + "depthwise_relu")(x) + x = layers.ReLU(6.0, name=f"{prefix}depthwise_relu")(x) # Project with a pointwise 1x1 convolution. x = layers.Conv2D( @@ -460,17 +451,17 @@ def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id): padding="same", use_bias=False, activation=None, - name=prefix + "project", + name=f"{prefix}project", )(x) x = layers.BatchNormalization( axis=channel_axis, epsilon=1e-3, momentum=0.999, - name=prefix + "project_BN", + name=f"{prefix}project_BN", )(x) if in_channels == pointwise_filters and stride == 1: - return layers.Add(name=prefix + "add")([inputs, x]) + return layers.Add(name=f"{prefix}add")([inputs, x]) return x diff --git a/keras/src/applications/mobilenet_v3.py b/keras/src/applications/mobilenet_v3.py index 972ae8d4323b..94efb7715c46 100644 --- a/keras/src/applications/mobilenet_v3.py +++ b/keras/src/applications/mobilenet_v3.py @@ -383,10 +383,10 @@ def MobileNetV3( model_type, "_minimalistic" if minimalistic else "", str(alpha) ) if include_top: - file_name = "weights_mobilenet_v3_" + model_name + ".h5" + file_name = f"weights_mobilenet_v3_{model_name}.h5" file_hash = WEIGHTS_HASHES[model_name][0] else: - file_name = "weights_mobilenet_v3_" + model_name + "_no_top_v2.h5" + file_name = f"weights_mobilenet_v3_{model_name}_no_top_v2.h5" file_hash = WEIGHTS_HASHES[model_name][1] weights_path = file_utils.get_file( file_name, @@ -568,23 +568,23 @@ def _depth(v, divisor=8, min_value=None): def _se_block(inputs, filters, se_ratio, prefix): x = layers.GlobalAveragePooling2D( - keepdims=True, name=prefix + "squeeze_excite_avg_pool" + keepdims=True, name=f"{prefix}squeeze_excite_avg_pool" )(inputs) x = layers.Conv2D( _depth(filters * se_ratio), kernel_size=1, padding="same", - name=prefix + "squeeze_excite_conv", + name=f"{prefix}squeeze_excite_conv", )(x) - x = layers.ReLU(name=prefix + "squeeze_excite_relu")(x) + x = layers.ReLU(name=f"{prefix}squeeze_excite_relu")(x) x = layers.Conv2D( filters, kernel_size=1, padding="same", - name=prefix + "squeeze_excite_conv_1", + name=f"{prefix}squeeze_excite_conv_1", )(x) x = hard_sigmoid(x) - x = layers.Multiply(name=prefix + "squeeze_excite_mul")([inputs, x]) + x = layers.Multiply(name=f"{prefix}squeeze_excite_mul")([inputs, x]) return x @@ -603,33 +603,33 @@ def _inverted_res_block( kernel_size=1, padding="same", use_bias=False, - name=prefix + "expand", + name=f"{prefix}expand", )(x) x = layers.BatchNormalization( axis=channel_axis, epsilon=1e-3, momentum=0.999, - name=prefix + "expand_bn", + name=f"{prefix}expand_bn", )(x) x = activation(x) if stride == 2: x = layers.ZeroPadding2D( padding=imagenet_utils.correct_pad(x, kernel_size), - name=prefix + "depthwise_pad", + name=f"{prefix}depthwise_pad", )(x) x = layers.DepthwiseConv2D( kernel_size, strides=stride, padding="same" if stride == 1 else "valid", use_bias=False, - name=prefix + "depthwise", + name=f"{prefix}depthwise", )(x) x = layers.BatchNormalization( axis=channel_axis, epsilon=1e-3, momentum=0.999, - name=prefix + "depthwise_bn", + name=f"{prefix}depthwise_bn", )(x) x = activation(x) @@ -641,17 +641,17 @@ def _inverted_res_block( kernel_size=1, padding="same", use_bias=False, - name=prefix + "project", + name=f"{prefix}project", )(x) x = layers.BatchNormalization( axis=channel_axis, epsilon=1e-3, momentum=0.999, - name=prefix + "project_bn", + name=f"{prefix}project_bn", )(x) if stride == 1 and infilters == filters: - x = layers.Add(name=prefix + "add")([shortcut, x]) + x = layers.Add(name=f"{prefix}add")([shortcut, x]) return x diff --git a/keras/src/applications/nasnet.py b/keras/src/applications/nasnet.py index b08f9bac6e21..0f935611bc6d 100644 --- a/keras/src/applications/nasnet.py +++ b/keras/src/applications/nasnet.py @@ -11,10 +11,10 @@ BASE_WEIGHTS_PATH = ( "https://storage.googleapis.com/tensorflow/keras-applications/nasnet/" ) -NASNET_MOBILE_WEIGHT_PATH = BASE_WEIGHTS_PATH + "NASNet-mobile.h5" -NASNET_MOBILE_WEIGHT_PATH_NO_TOP = BASE_WEIGHTS_PATH + "NASNet-mobile-no-top.h5" -NASNET_LARGE_WEIGHT_PATH = BASE_WEIGHTS_PATH + "NASNet-large.h5" -NASNET_LARGE_WEIGHT_PATH_NO_TOP = BASE_WEIGHTS_PATH + "NASNet-large-no-top.h5" +NASNET_MOBILE_WEIGHT_PATH = f"{BASE_WEIGHTS_PATH}NASNet-mobile.h5" +NASNET_MOBILE_WEIGHT_PATH_NO_TOP = f"{BASE_WEIGHTS_PATH}NASNet-mobile-no-top.h5" +NASNET_LARGE_WEIGHT_PATH = f"{BASE_WEIGHTS_PATH}NASNet-large.h5" +NASNET_LARGE_WEIGHT_PATH_NO_TOP = f"{BASE_WEIGHTS_PATH}NASNet-large-no-top.h5" def NASNet( @@ -137,10 +137,8 @@ def NASNet( and weights == "imagenet" ): raise ValueError( - "When specifying the input shape of a NASNet" - " and loading `ImageNet` weights, " - "the input_shape argument must be static " - "(no None entries). Got: `input_shape=" + str(input_shape) + "`." + "When specifying the input shape of a NASNet and loading `ImageNet` weights, " + f"the input_shape argument must be static (no None entries). Got: `input_shape={input_shape}`." ) if default_size is None: diff --git a/keras/src/applications/resnet.py b/keras/src/applications/resnet.py index 0948f8901db1..7e7f11576bc4 100644 --- a/keras/src/applications/resnet.py +++ b/keras/src/applications/resnet.py @@ -196,16 +196,16 @@ def ResNet( # Load weights. if (weights == "imagenet") and (weights_name in WEIGHTS_HASHES): if include_top: - file_name = weights_name + "_weights_tf_dim_ordering_tf_kernels.h5" + file_name = f"{weights_name}_weights_tf_dim_ordering_tf_kernels.h5" file_hash = WEIGHTS_HASHES[weights_name][0] else: file_name = ( - weights_name + "_weights_tf_dim_ordering_tf_kernels_notop.h5" + f"{weights_name}_weights_tf_dim_ordering_tf_kernels_notop.h5" ) file_hash = WEIGHTS_HASHES[weights_name][1] weights_path = file_utils.get_file( file_name, - BASE_WEIGHTS_PATH + file_name, + f"{BASE_WEIGHTS_PATH}{file_name}", cache_subdir="models", file_hash=file_hash, ) @@ -241,35 +241,35 @@ def residual_block_v1( if conv_shortcut: shortcut = layers.Conv2D( - 4 * filters, 1, strides=stride, name=name + "_0_conv" + 4 * filters, 1, strides=stride, name=f"{name}_0_conv" )(x) shortcut = layers.BatchNormalization( - axis=bn_axis, epsilon=1.001e-5, name=name + "_0_bn" + axis=bn_axis, epsilon=1.001e-5, name=f"{name}_0_bn" )(shortcut) else: shortcut = x - x = layers.Conv2D(filters, 1, strides=stride, name=name + "_1_conv")(x) + x = layers.Conv2D(filters, 1, strides=stride, name=f"{name}_1_conv")(x) x = layers.BatchNormalization( - axis=bn_axis, epsilon=1.001e-5, name=name + "_1_bn" + axis=bn_axis, epsilon=1.001e-5, name=f"{name}_1_bn" )(x) - x = layers.Activation("relu", name=name + "_1_relu")(x) + x = layers.Activation("relu", name=f"{name}_1_relu")(x) x = layers.Conv2D( - filters, kernel_size, padding="SAME", name=name + "_2_conv" + filters, kernel_size, padding="SAME", name=f"{name}_2_conv" )(x) x = layers.BatchNormalization( - axis=bn_axis, epsilon=1.001e-5, name=name + "_2_bn" + axis=bn_axis, epsilon=1.001e-5, name=f"{name}_2_bn" )(x) - x = layers.Activation("relu", name=name + "_2_relu")(x) + x = layers.Activation("relu", name=f"{name}_2_relu")(x) - x = layers.Conv2D(4 * filters, 1, name=name + "_3_conv")(x) + x = layers.Conv2D(4 * filters, 1, name=f"{name}_3_conv")(x) x = layers.BatchNormalization( - axis=bn_axis, epsilon=1.001e-5, name=name + "_3_bn" + axis=bn_axis, epsilon=1.001e-5, name=f"{name}_3_bn" )(x) - x = layers.Add(name=name + "_add")([shortcut, x]) - x = layers.Activation("relu", name=name + "_out")(x) + x = layers.Add(name=f"{name}_add")([shortcut, x]) + x = layers.Activation("relu", name=f"{name}_out")(x) return x @@ -287,10 +287,10 @@ def stack_residual_blocks_v1(x, filters, blocks, stride1=2, name=None): Output tensor for the stacked blocks. """ - x = residual_block_v1(x, filters, stride=stride1, name=name + "_block1") + x = residual_block_v1(x, filters, stride=stride1, name=f"{name}_block1") for i in range(2, blocks + 1): x = residual_block_v1( - x, filters, conv_shortcut=False, name=name + "_block" + str(i) + x, filters, conv_shortcut=False, name=f"{name}_block" + str(i) ) return x @@ -319,13 +319,13 @@ def residual_block_v2( bn_axis = 1 preact = layers.BatchNormalization( - axis=bn_axis, epsilon=1.001e-5, name=name + "_preact_bn" + axis=bn_axis, epsilon=1.001e-5, name=f"{name}_preact_bn" )(x) - preact = layers.Activation("relu", name=name + "_preact_relu")(preact) + preact = layers.Activation("relu", name=f"{name}_preact_relu")(preact) if conv_shortcut: shortcut = layers.Conv2D( - 4 * filters, 1, strides=stride, name=name + "_0_conv" + 4 * filters, 1, strides=stride, name=f"{name}_0_conv" )(preact) else: shortcut = ( @@ -333,28 +333,28 @@ def residual_block_v2( ) x = layers.Conv2D( - filters, 1, strides=1, use_bias=False, name=name + "_1_conv" + filters, 1, strides=1, use_bias=False, name=f"{name}_1_conv" )(preact) x = layers.BatchNormalization( - axis=bn_axis, epsilon=1.001e-5, name=name + "_1_bn" + axis=bn_axis, epsilon=1.001e-5, name=f"{name}_1_bn" )(x) - x = layers.Activation("relu", name=name + "_1_relu")(x) + x = layers.Activation("relu", name=f"{name}_1_relu")(x) - x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=name + "_2_pad")(x) + x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=f"{name}_2_pad")(x) x = layers.Conv2D( filters, kernel_size, strides=stride, use_bias=False, - name=name + "_2_conv", + name=f"{name}_2_conv", )(x) x = layers.BatchNormalization( - axis=bn_axis, epsilon=1.001e-5, name=name + "_2_bn" + axis=bn_axis, epsilon=1.001e-5, name=f"{name}_2_bn" )(x) - x = layers.Activation("relu", name=name + "_2_relu")(x) + x = layers.Activation("relu", name=f"{name}_2_relu")(x) - x = layers.Conv2D(4 * filters, 1, name=name + "_3_conv")(x) - x = layers.Add(name=name + "_out")([shortcut, x]) + x = layers.Conv2D(4 * filters, 1, name=f"{name}_3_conv")(x) + x = layers.Add(name=f"{name}_out")([shortcut, x]) return x @@ -372,11 +372,11 @@ def stack_residual_blocks_v2(x, filters, blocks, stride1=2, name=None): Output tensor for the stacked blocks. """ - x = residual_block_v2(x, filters, conv_shortcut=True, name=name + "_block1") + x = residual_block_v2(x, filters, conv_shortcut=True, name=f"{name}_block1") for i in range(2, blocks): - x = residual_block_v2(x, filters, name=name + "_block" + str(i)) + x = residual_block_v2(x, filters, name=f"{name}_block" + str(i)) x = residual_block_v2( - x, filters, stride=stride1, name=name + "_block" + str(blocks) + x, filters, stride=stride1, name=f"{name}_block" + str(blocks) ) return x diff --git a/keras/src/applications/xception.py b/keras/src/applications/xception.py index 2464d45ae2a2..1d632f897ebc 100644 --- a/keras/src/applications/xception.py +++ b/keras/src/applications/xception.py @@ -214,38 +214,38 @@ def Xception( residual = x prefix = "block" + str(i + 5) - x = layers.Activation("relu", name=prefix + "_sepconv1_act")(x) + x = layers.Activation("relu", name=f"{prefix}_sepconv1_act")(x) x = layers.SeparableConv2D( 728, (3, 3), padding="same", use_bias=False, - name=prefix + "_sepconv1", + name=f"{prefix}_sepconv1", )(x) x = layers.BatchNormalization( - axis=channel_axis, name=prefix + "_sepconv1_bn" + axis=channel_axis, name=f"{prefix}_sepconv1_bn" )(x) - x = layers.Activation("relu", name=prefix + "_sepconv2_act")(x) + x = layers.Activation("relu", name=f"{prefix}_sepconv2_act")(x) x = layers.SeparableConv2D( 728, (3, 3), padding="same", use_bias=False, - name=prefix + "_sepconv2", + name=f"{prefix}_sepconv2", )(x) x = layers.BatchNormalization( - axis=channel_axis, name=prefix + "_sepconv2_bn" + axis=channel_axis, name=f"{prefix}_sepconv2_bn" )(x) - x = layers.Activation("relu", name=prefix + "_sepconv3_act")(x) + x = layers.Activation("relu", name=f"{prefix}_sepconv3_act")(x) x = layers.SeparableConv2D( 728, (3, 3), padding="same", use_bias=False, - name=prefix + "_sepconv3", + name=f"{prefix}_sepconv3", )(x) x = layers.BatchNormalization( - axis=channel_axis, name=prefix + "_sepconv3_bn" + axis=channel_axis, name=f"{prefix}_sepconv3_bn" )(x) x = layers.add([x, residual]) diff --git a/keras/src/backend/common/variables.py b/keras/src/backend/common/variables.py index d40b67e06174..36e5305d6faf 100644 --- a/keras/src/backend/common/variables.py +++ b/keras/src/backend/common/variables.py @@ -142,7 +142,7 @@ def __init__( self._name = name parent_path = current_path() if parent_path: - self._path = current_path() + "/" + name + self._path = os.path.join(current_path(), name) else: self._path = name self._shape = None diff --git a/keras/src/backend/jax/distribution_lib_test.py b/keras/src/backend/jax/distribution_lib_test.py index f79173522ba9..fa4b72e88cc8 100644 --- a/keras/src/backend/jax/distribution_lib_test.py +++ b/keras/src/backend/jax/distribution_lib_test.py @@ -24,7 +24,7 @@ # Don't override user-specified device count, or other XLA flags. if "xla_force_host_platform_device_count" not in xla_flags: os.environ["XLA_FLAGS"] = ( - xla_flags + " --xla_force_host_platform_device_count=8" + f"{xla_flags} --xla_force_host_platform_device_count=8" ) diff --git a/keras/src/backend/jax/export.py b/keras/src/backend/jax/export.py index a50562f6729f..71f0d88a5768 100644 --- a/keras/src/backend/jax/export.py +++ b/keras/src/backend/jax/export.py @@ -159,7 +159,7 @@ def convert_shape(x): poly_shape.append("batch") else: poly_shape.append(next(dim_names)) - return "(" + ", ".join(poly_shape) + ")" + return f"({', '.join(poly_shape)})" return tree.map_structure(convert_shape, struct) diff --git a/keras/src/callbacks/backup_and_restore.py b/keras/src/callbacks/backup_and_restore.py index 5e0b9524edbc..6e3add89eb6e 100644 --- a/keras/src/callbacks/backup_and_restore.py +++ b/keras/src/callbacks/backup_and_restore.py @@ -99,10 +99,8 @@ def __init__( self._training_metadata_path = file_utils.join( backup_dir, "training_metadata.json" ) - self._prev_weights_path = self._weights_path + ".bkp" - self._prev_training_metadata_path = ( - self._training_metadata_path + ".bkp" - ) + self._prev_weights_path = f"{self._weights_path}.bkp" + self._prev_training_metadata_path = f"{self._training_metadata_path}.bkp" if save_freq != "epoch" and not isinstance(save_freq, int): raise ValueError( "Invalid value for argument `save_freq`. " diff --git a/keras/src/callbacks/model_checkpoint.py b/keras/src/callbacks/model_checkpoint.py index f4315de973f6..5c2b84968540 100644 --- a/keras/src/callbacks/model_checkpoint.py +++ b/keras/src/callbacks/model_checkpoint.py @@ -373,7 +373,7 @@ def _get_most_recently_modified_file_matching_pattern(self, pattern): """ dir_name = os.path.dirname(pattern) base_name = os.path.basename(pattern) - base_name_regex = "^" + re.sub(r"{.*}", r".*", base_name) + "$" + base_name_regex = f"^{re.sub(r'{.*}', r'.*', base_name)}$" latest_mod_time = 0 file_path_with_latest_mod_time = None diff --git a/keras/src/callbacks/swap_ema_weights_test.py b/keras/src/callbacks/swap_ema_weights_test.py index 4c83f3bf5a70..63149fd3b3ce 100644 --- a/keras/src/callbacks/swap_ema_weights_test.py +++ b/keras/src/callbacks/swap_ema_weights_test.py @@ -1,3 +1,4 @@ +import os.path import tempfile import pytest @@ -107,11 +108,11 @@ def test_swap_ema_weights_on_epoch(self): epochs=2, callbacks=[ callbacks.SwapEMAWeights(swap_on_epoch=True), - callbacks.ModelCheckpoint(temp_dir + "/{epoch:1d}.keras"), + callbacks.ModelCheckpoint(os.path.join(temp_dir, "{epoch:1d}.keras")), ], validation_data=(self.x_train, self.y_train), ) - model2 = saving.load_model(temp_dir + "/2.keras") + model2 = saving.load_model(os.path.join(temp_dir, "2.keras")) logs = model.evaluate(self.x_train, self.y_train, return_dict=True) logs2 = model2.evaluate(self.x_train, self.y_train, return_dict=True) @@ -166,12 +167,12 @@ def test_swap_ema_weights_with_tf_distribute(self): callbacks=[ callbacks.SwapEMAWeights(swap_on_epoch=True), callbacks.ModelCheckpoint( - temp_dir + "/distributed_{epoch:1d}.keras" + os.path.join(temp_dir, "distributed_{epoch:1d}.keras") ), ], validation_data=(self.x_train, self.y_train), ) - model2 = saving.load_model(temp_dir + "/distributed_2.keras") + model2 = saving.load_model(os.path.join(temp_dir, "distributed_2.keras")) logs = model.evaluate(self.x_train, self.y_train, return_dict=True) logs2 = model2.evaluate(self.x_train, self.y_train, return_dict=True) # saved checkpoint will be applied by EMA weights diff --git a/keras/src/callbacks/tensorboard.py b/keras/src/callbacks/tensorboard.py index eef8b15a2273..5f266b3598d3 100644 --- a/keras/src/callbacks/tensorboard.py +++ b/keras/src/callbacks/tensorboard.py @@ -424,7 +424,7 @@ def on_test_end(self, logs=None): with self._val_writer.as_default(): for name, value in logs.items(): self.summary.scalar( - "evaluation_" + name + "_vs_iterations", + f"evaluation_{name}_vs_iterations", value, step=self.model.optimizer.iterations, ) @@ -562,14 +562,14 @@ def _log_weights(self, epoch): for weight in layer.weights: weight_name = weight.name.replace(":", "_") # Add a suffix to prevent summary tag name collision. - histogram_weight_name = weight_name + "/histogram" + histogram_weight_name = f"{weight_name}/histogram" self.summary.histogram( histogram_weight_name, weight, step=epoch ) if self.write_images: # Add a suffix to prevent summary tag name # collision. - image_weight_name = weight_name + "/image" + image_weight_name = f"{weight_name}/image" self._log_weight_as_image( weight, image_weight_name, epoch ) diff --git a/keras/src/datasets/boston_housing.py b/keras/src/datasets/boston_housing.py index de6133a223d2..7864ea126b3b 100644 --- a/keras/src/datasets/boston_housing.py +++ b/keras/src/datasets/boston_housing.py @@ -48,7 +48,7 @@ def load_data(path="boston_housing.npz", test_split=0.2, seed=113): ) path = get_file( path, - origin=origin_folder + "boston_housing.npz", + origin=f"{origin_folder}boston_housing.npz", file_hash=( # noqa: E501 "f553886a1f8d56431e820c5b82552d9d95cfcb96d1e678153f8839538947dff5" ), diff --git a/keras/src/datasets/california_housing.py b/keras/src/datasets/california_housing.py index 467d196a720d..f93a8f47be15 100644 --- a/keras/src/datasets/california_housing.py +++ b/keras/src/datasets/california_housing.py @@ -73,7 +73,7 @@ def load_data( ) path = get_file( path, - origin=origin_folder + "california_housing.npz", + origin=f"{origin_folder}california_housing.npz", file_hash=( # noqa: E501 "1a2e3a52e0398de6463aebe6f4a8da34fb21fbb6b934cf88c3425e766f2a1a6f" ), diff --git a/keras/src/datasets/cifar100.py b/keras/src/datasets/cifar100.py index e27421a6cf0e..7576afd89878 100644 --- a/keras/src/datasets/cifar100.py +++ b/keras/src/datasets/cifar100.py @@ -71,10 +71,10 @@ def load_data(label_mode="fine"): path = os.path.join(path, "cifar-100-python") fpath = os.path.join(path, "train") - x_train, y_train = load_batch(fpath, label_key=label_mode + "_labels") + x_train, y_train = load_batch(fpath, label_key=f"{label_mode}_labels") fpath = os.path.join(path, "test") - x_test, y_test = load_batch(fpath, label_key=label_mode + "_labels") + x_test, y_test = load_batch(fpath, label_key=f"{label_mode}_labels") y_train = np.reshape(y_train, (len(y_train), 1)) y_test = np.reshape(y_test, (len(y_test), 1)) diff --git a/keras/src/datasets/imdb.py b/keras/src/datasets/imdb.py index f38dfaf0a158..753d7474cd54 100644 --- a/keras/src/datasets/imdb.py +++ b/keras/src/datasets/imdb.py @@ -78,7 +78,7 @@ def load_data( ) path = get_file( fname=path, - origin=origin_folder + "imdb.npz", + origin=f"{origin_folder}imdb.npz", file_hash=( # noqa: E501 "69664113be75683a8fe16e3ed0ab59fda8886cb3cd7ada244f7d9544e4676b9f" ), @@ -181,7 +181,7 @@ def get_word_index(path="imdb_word_index.json"): ) path = get_file( fname=path, - origin=origin_folder + "imdb_word_index.json", + origin=f"{origin_folder}imdb_word_index.json", file_hash="bfafd718b763782e994055a2d397834f", ) with open(path) as f: diff --git a/keras/src/datasets/mnist.py b/keras/src/datasets/mnist.py index b7e41cb78136..697801b92cdf 100644 --- a/keras/src/datasets/mnist.py +++ b/keras/src/datasets/mnist.py @@ -59,7 +59,7 @@ def load_data(path="mnist.npz"): ) path = get_file( fname=path, - origin=origin_folder + "mnist.npz", + origin=f"{origin_folder}mnist.npz", file_hash=( # noqa: E501 "731c5ac602752760c8e48fbffcf8c3b850d9dc2a2aedcf2cc48468fc17b673d1" ), diff --git a/keras/src/datasets/reuters.py b/keras/src/datasets/reuters.py index 552b3997d441..b35a81859578 100644 --- a/keras/src/datasets/reuters.py +++ b/keras/src/datasets/reuters.py @@ -87,7 +87,7 @@ def load_data( ) path = get_file( fname=path, - origin=origin_folder + "reuters.npz", + origin=f"{origin_folder}reuters.npz", file_hash=( # noqa: E501 "d6586e694ee56d7a4e65172e12b3e987c03096cb01eab99753921ef915959916" ), @@ -156,7 +156,7 @@ def get_word_index(path="reuters_word_index.json"): ) path = get_file( path, - origin=origin_folder + "reuters_word_index.json", + origin=f"{origin_folder}reuters_word_index.json", file_hash="4d44cc38712099c9e383dc6e5f11a921", ) with open(path) as f: diff --git a/keras/src/export/tf2onnx_lib.py b/keras/src/export/tf2onnx_lib.py index 8dee72b2af49..975a524859a2 100644 --- a/keras/src/export/tf2onnx_lib.py +++ b/keras/src/export/tf2onnx_lib.py @@ -156,11 +156,7 @@ def prod(x): > external_tensor_storage.external_tensor_size_threshold ): a = copy.deepcopy(a) - tensor_name = ( - self.name.strip() - + "_" - + str(external_tensor_storage.name_counter) - ) + tensor_name = f"{self.name.strip()}_{external_tensor_storage.name_counter}" for c in '~"#%&*:<>?/\\{|}': tensor_name = tensor_name.replace(c, "_") external_tensor_storage.name_counter += 1 diff --git a/keras/src/legacy/backend.py b/keras/src/legacy/backend.py index 1c3876d85836..52068c4484af 100644 --- a/keras/src/legacy/backend.py +++ b/keras/src/legacy/backend.py @@ -68,11 +68,7 @@ def batch_dot(x, y, axes=None): raise ValueError( "Cannot do batch_dot on inputs " "with rank < 2. " - "Received inputs with tf.shapes " - + str(x_shape) - + " and " - + str(y_shape) - + "." + f"Received inputs with tf.shapes {x_shape} and {y_shape}." ) x_batch_size = x_shape[0] @@ -84,10 +80,7 @@ def batch_dot(x, y, axes=None): "Cannot do batch_dot on inputs " "with different batch sizes. " "Received inputs with tf.shapes " - + str(x_shape) - + " and " - + str(y_shape) - + "." + f"{x_shape} and {y_shape}." ) if isinstance(axes, int): axes = [axes, axes] @@ -101,9 +94,8 @@ def batch_dot(x, y, axes=None): if py_any(isinstance(a, (list, tuple)) for a in axes): raise ValueError( "Multiple target dimensions are not supported. " - + "Expected: None, int, (int, int), " - + "Provided: " - + str(axes) + "Expected: None, int, (int, int), " + f"Provided: {axes}" ) # if tuple, convert to list. @@ -130,12 +122,8 @@ def batch_dot(x, y, axes=None): if d1 is not None and d2 is not None and d1 != d2: raise ValueError( "Cannot do batch_dot on inputs with tf.shapes " - + str(x_shape) - + " and " - + str(y_shape) - + " with axes=" - + str(axes) - + ". x.shape[%d] != y.shape[%d] (%d != %d)." + f"{x_shape} and {y_shape} with axes={axes}. " + "x.shape[%d] != y.shape[%d] (%d != %d)." % (axes[0], axes[1], d1, d2) ) @@ -2150,9 +2138,7 @@ def else_expression_fn(): "Rank of `condition` should be less than or" " equal to rank of `then_expression` and " "`else_expression`. ndim(condition)=" - + str(cond_ndim) - + ", ndim(then_expression)=" - + str(expr_ndim) + f"{cond_ndim}, ndim(then_expression)={expr_ndim}" ) if cond_ndim > 1: ndim_diff = expr_ndim - cond_ndim diff --git a/keras/src/legacy/preprocessing/image.py b/keras/src/legacy/preprocessing/image.py index 4a0e8b44d395..5409ea87b425 100644 --- a/keras/src/legacy/preprocessing/image.py +++ b/keras/src/legacy/preprocessing/image.py @@ -617,17 +617,11 @@ def __init__( channels_axis = 3 if data_format == "channels_last" else 1 if self.x.shape[channels_axis] not in {1, 3, 4}: warnings.warn( - 'NumpyArrayIterator is set to use the data format convention "' - + data_format - + '" (channels on axis ' - + str(channels_axis) - + "), i.e. expected either 1, 3, or 4 channels on axis " - + str(channels_axis) - + ". However, it was passed an array with shape " - + str(self.x.shape) - + " (" - + str(self.x.shape[channels_axis]) - + " channels)." + f'NumpyArrayIterator is set to use the data format convention "{data_format}' + f'" (channels on axis {channels_axis})' + f", i.e. expected either 1, 3, or 4 channels on axis {channels_axis}. " + f"However, it was passed an array with shape {self.x.shape}" + f" ({self.x.shape[channels_axis]} channels)." ) if y is not None: self.y = np.asarray(y) @@ -1494,17 +1488,11 @@ def fit(self, x, augment=False, rounds=1, seed=None): if x.shape[self.channel_axis] not in {1, 3, 4}: warnings.warn( "Expected input to be images (as Numpy array) " - 'following the data format convention "' - + self.data_format - + '" (channels on axis ' - + str(self.channel_axis) - + "), i.e. expected either 1, 3 or 4 channels on axis " - + str(self.channel_axis) - + ". However, it was passed an array with shape " - + str(x.shape) - + " (" - + str(x.shape[self.channel_axis]) - + " channels)." + f'following the data format convention "{self.data_format}' + f'" (channels on axis {self.channel_axis})' + ", i.e. expected either 1, 3 or 4 channels on axis " + f"{self.channel_axis}. However, it was passed an array with shape " + f"{x.shape} ({x.shape[self.channel_axis]} channels)." ) if seed is not None: diff --git a/keras/src/models/cloning_test.py b/keras/src/models/cloning_test.py index b7e576798591..b370332c87e2 100644 --- a/keras/src/models/cloning_test.py +++ b/keras/src/models/cloning_test.py @@ -124,14 +124,14 @@ def test_cloning_correctness(self, model_fn, is_conv=False): def test_custom_clone_function(self, model_fn): def clone_function(layer): config = layer.get_config() - config["name"] = config["name"] + "_custom" + config["name"] = f"{config['name']}_custom" return layer.__class__.from_config(config) model = model_fn() new_model = clone_model(model, clone_function=clone_function) for l1, l2 in zip(model.layers, new_model.layers): if not isinstance(l1, layers.InputLayer): - self.assertEqual(l2.name, l1.name + "_custom") + self.assertEqual(l2.name, f"{l1.name}_custom") @parameterized.named_parameters( ("cnn_functional", get_cnn_functional_model), diff --git a/keras/src/models/functional.py b/keras/src/models/functional.py index ca165d65bb13..e47fb0ae27a7 100644 --- a/keras/src/models/functional.py +++ b/keras/src/models/functional.py @@ -779,7 +779,7 @@ def is_input_keras_tensor(x): def clone_single_keras_tensor(x): return backend.KerasTensor( - shape=x.shape, dtype=x.dtype, sparse=x.sparse, name=x.name + "_clone" + shape=x.shape, dtype=x.dtype, sparse=x.sparse, name=f"{x.name}_clone" ) @@ -842,7 +842,7 @@ def clone_graph_nodes(inputs, outputs): batch_shape=kt_input.shape, dtype=kt_input.dtype, sparse=kt_input.sparse, - name=kt_input.name + "CLONE", + name=f"{kt_input.name}CLONE", ) cloned_inputs.append(cloned_input) kt_id_mapping[id(kt_input)] = cloned_input diff --git a/keras/src/models/model.py b/keras/src/models/model.py index d80a9ecada5b..377546367be6 100644 --- a/keras/src/models/model.py +++ b/keras/src/models/model.py @@ -842,9 +842,9 @@ def _flatten_nested_dict(self, nested_dict): def _flatten(current_dict, prefix=""): for key, value in current_dict.items(): if isinstance(value, dict): - _flatten(value, prefix + key + "/") + _flatten(value, f"{prefix}{key}/") else: - flat_dict[prefix + key] = value + flat_dict[f"{prefix}{key}"] = value _flatten(nested_dict) return flat_dict diff --git a/keras/src/ops/function.py b/keras/src/ops/function.py index 18088cd3f5d9..fa0486ecf913 100644 --- a/keras/src/ops/function.py +++ b/keras/src/ops/function.py @@ -215,7 +215,7 @@ def _assert_input_compatibility(self, inputs): def make_node_key(op, node_index): - return str(id(op)) + "_ib-" + str(node_index) + return f"{id(op)}_ib-{node_index}" def map_graph(inputs, outputs): diff --git a/keras/src/ops/numpy.py b/keras/src/ops/numpy.py index 1b3a8a64f3b0..752459c5ad58 100644 --- a/keras/src/ops/numpy.py +++ b/keras/src/ops/numpy.py @@ -2759,7 +2759,7 @@ def compute_output_spec(self, *operands): kept_dims = sorted(kept_dims) if output_spec is None: - target_broadcast_spec = "..." + "".join(kept_dims) + target_broadcast_spec = f"...{''.join(kept_dims)}" else: target_broadcast_spec = output_spec @@ -2781,18 +2781,18 @@ def compute_output_spec(self, *operands): ) for size, s in zip(x_shape, split_spec[0]): # Replace the letter with the right shape. - expanded_shape = expanded_shape.replace(s, str(size) + " ") + expanded_shape = expanded_shape.replace(s, f"{str(size)} ") expanded_shape = expanded_shape.replace("...", "") else: # In this case, the input spec has "...", e.g., "i...j", "i...", # or "...j". for i in range(len(split_spec[0])): expanded_shape = expanded_shape.replace( - split_spec[0][i], str(x_shape[i]) + " " + split_spec[0][i], f"{x_shape[i]} " ) for i in range(len(split_spec[1])): expanded_shape = expanded_shape.replace( - split_spec[1][-i - 1], str(x_shape[-i - 1]) + " " + split_spec[1][-i - 1], f"{x_shape[-i - 1]} " ) # Shape matched by "..." will be inserted to the position of # "...". @@ -2806,7 +2806,7 @@ def compute_output_spec(self, *operands): wildcard_shape_start_index:wildcard_shape_end_index ] wildcard_shape_str = ( - " ".join([str(size) for size in wildcard_shape]) + " " + f"{' '.join([str(size) for size in wildcard_shape])} " ) expanded_shape = expanded_shape.replace( "...", wildcard_shape_str diff --git a/keras/src/ops/operation.py b/keras/src/ops/operation.py index cd3123be3b33..7bd7e58aacfc 100644 --- a/keras/src/ops/operation.py +++ b/keras/src/ops/operation.py @@ -1,4 +1,5 @@ import inspect +import os.path import textwrap from keras.src import backend @@ -17,10 +18,10 @@ class Operation: def __init__(self, dtype=None, name=None): if name is None: name = auto_name(self.__class__.__name__) - if not isinstance(name, str) or "/" in name: + if not isinstance(name, str) or os.path.sep in name: raise ValueError( "Argument `name` must be a string and " - "cannot contain character `/`. " + f"cannot contain character `{os.path.sep}`. " f"Received: name={name} (of type {type(name)})" ) self._dtype_policy = dtype_policies.get(dtype) diff --git a/keras/src/optimizers/base_optimizer.py b/keras/src/optimizers/base_optimizer.py index a996e9945cc8..30f951904bb9 100644 --- a/keras/src/optimizers/base_optimizer.py +++ b/keras/src/optimizers/base_optimizer.py @@ -310,13 +310,12 @@ def add_variable_from_reference( """ name = name or "var" if hasattr(reference_variable, "path"): - name = reference_variable.path.replace("/", "_") + "_" + name + name = f"{reference_variable.path.replace('/', '_')}_{name}" else: - name = ( - str(reference_variable.name).replace("/", "_").replace(":", "_") - + "_" - + name - ) + sanitised_ref_name = (str(reference_variable.name) + .replace('/', '_') + .replace(':', '_')) + name = f"{sanitised_ref_name}_{name}" return self.add_variable( shape=reference_variable.shape, initializer=initializer, diff --git a/keras/src/saving/file_editor.py b/keras/src/saving/file_editor.py index 620d7b111b4c..6bbc36131b8e 100644 --- a/keras/src/saving/file_editor.py +++ b/keras/src/saving/file_editor.py @@ -1,5 +1,6 @@ import collections import json +import os.path import pprint import zipfile @@ -76,7 +77,7 @@ def __init__( if filepath.endswith(".keras"): zf = zipfile.ZipFile(filepath, "r") weights_store = H5IOStore( - saving_lib._VARS_FNAME + ".h5", + f"{saving_lib._VARS_FNAME}.h5", archive=zf, mode="r", ) @@ -143,7 +144,7 @@ def _compare( ): base_inner_path = inner_path for ref_key, ref_val in ref_spec.items(): - inner_path = base_inner_path + "/" + ref_key + inner_path = f"{base_inner_path}/{ref_key}" if inner_path in checked_paths: continue @@ -435,7 +436,7 @@ def _save(weights_dict, weights_store, inner_path): _save( weights_dict[name], weights_store, - inner_path=inner_path + "/" + name, + inner_path=os.path.join(inner_path, name), ) else: # e.g. name="0", value=HDF5Dataset @@ -462,7 +463,7 @@ def _extract_weights_from_store(self, data, metadata=None, inner_path=""): result = collections.OrderedDict() for key in data.keys(): - inner_path = inner_path + "/" + key + inner_path = f"{inner_path}/{key}" value = data[key] if isinstance(value, h5py.Group): if len(value) == 0: @@ -506,7 +507,7 @@ def _print_weights_structure( self, weights_dict, indent=0, is_first=True, prefix="", inner_path="" ): for idx, (key, value) in enumerate(weights_dict.items()): - inner_path = inner_path + "/" + key + inner_path = os.path.join(inner_path, key) is_last = idx == len(weights_dict) - 1 if is_first: is_first = False @@ -558,27 +559,26 @@ def _generate_html_weights(dictionary, margin_left=0, font_size=1): if isinstance(value, dict) and value: html += ( f'
' - + '{key}' - + _generate_html_weights( + '{key}' + f"{_generate_html_weights( value, margin_left + 20, font_size - 1 - ) - + "
" + )}" ) else: html += ( f'
' - + f'' - + f"{key} : shape={value.shape}" - + f", dtype={value.dtype}" - + f"
' + f"{key} : shape={value.shape}" + f", dtype={value.dtype}" + f"
' - + f"{display_weight(value)}" - + "
" - + "
" + f"{display_weight(value)}" + "" + "" ) return html diff --git a/keras/src/saving/object_registration.py b/keras/src/saving/object_registration.py index 8c0f538917bd..2b1ac1df803d 100644 --- a/keras/src/saving/object_registration.py +++ b/keras/src/saving/object_registration.py @@ -140,7 +140,7 @@ class MyDense(keras.layers.Dense): def decorator(arg): """Registers a class with the Keras serialization framework.""" class_name = name if name is not None else arg.__name__ - registered_name = package + ">" + class_name + registered_name = f"{package}>{class_name}" if inspect.isclass(arg) and not hasattr(arg, "get_config"): raise ValueError( diff --git a/keras/src/saving/saving_lib.py b/keras/src/saving/saving_lib.py index 72492cb4532c..fac57727b877 100644 --- a/keras/src/saving/saving_lib.py +++ b/keras/src/saving/saving_lib.py @@ -51,8 +51,8 @@ _CONFIG_FILENAME = "config.json" _METADATA_FILENAME = "metadata.json" _VARS_FNAME = "model.weights" # Will become e.g. "model.weights.h5" -_VARS_FNAME_H5 = _VARS_FNAME + ".h5" -_VARS_FNAME_NPZ = _VARS_FNAME + ".npz" +_VARS_FNAME_H5 = f"{_VARS_FNAME}.h5" +_VARS_FNAME_NPZ = f"{_VARS_FNAME}.npz" _ASSETS_DIRNAME = "assets" _MEMORY_UPPER_BOUND = 0.5 # 50% @@ -1260,7 +1260,7 @@ def get(self, path): # If not found, check shard map and switch files. weight_map = self.sharding_config["weight_map"] filename = weight_map.get(parsed_path) or weight_map.get( - "/" + parsed_path + "/vars" + f"/{parsed_path}/vars" ) if filename is not None and filename != self.current_shard_path.name: diff --git a/keras/src/saving/serialization_lib.py b/keras/src/saving/serialization_lib.py index e01d22d52728..f743aa649367 100644 --- a/keras/src/saving/serialization_lib.py +++ b/keras/src/saving/serialization_lib.py @@ -763,7 +763,7 @@ def _retrieve_class_or_fn( # module name might not match the package structure # (e.g. experimental symbols). if module == "keras" or module.startswith("keras."): - api_name = module + "." + name + api_name = f"{module}.{name}" obj = api_export.get_symbol_from_name(api_name) if obj is not None: @@ -776,7 +776,7 @@ def _retrieve_class_or_fn( if obj_type == "function" and module == "builtins": for mod in BUILTIN_MODULES: obj = api_export.get_symbol_from_name( - "keras." + mod + "." + name + f"keras.{mod}.{name}" ) if obj is not None: return obj diff --git a/keras/src/trainers/compile_utils.py b/keras/src/trainers/compile_utils.py index 62115f2f46e3..eac116159b3b 100644 --- a/keras/src/trainers/compile_utils.py +++ b/keras/src/trainers/compile_utils.py @@ -659,7 +659,7 @@ def __init__(self, loss, weight): # Add `Mean` metric to the tracker for each loss. if len(self._flat_losses) > 1: for _loss in self._flat_losses: - name = _loss.name + "_loss" + name = f"{_loss.name}_loss" self._tracker.add_to_store( "metrics", metrics_module.Mean(name=name) ) diff --git a/keras/src/utils/file_utils.py b/keras/src/utils/file_utils.py index 837d29531662..6f89a96dab97 100644 --- a/keras/src/utils/file_utils.py +++ b/keras/src/utils/file_utils.py @@ -3,9 +3,13 @@ import re import shutil import tarfile +import tempfile import urllib +import urllib.error +import urllib.parse import warnings import zipfile +from tempfile import tempdir from urllib.request import urlretrieve from keras.src.api_export import keras_export @@ -159,7 +163,7 @@ def get_file( ```python path_to_downloaded_file = get_file( origin="https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz", - extract=True, + extract=True ) ``` @@ -221,7 +225,9 @@ def get_file( hash_algorithm = "md5" datadir_base = os.path.expanduser(cache_dir) if not os.access(datadir_base, os.W_OK): - datadir_base = os.path.join("/tmp", ".keras") + datadir_base = os.path.join( + "/tmp" if os.path.isdir("/tmp") else tempfile.gettempdir(), ".keras" + ) datadir = os.path.join(datadir_base, cache_subdir) os.makedirs(datadir, exist_ok=True) @@ -249,13 +255,13 @@ def get_file( if "." in fname: download_target = os.path.join(datadir, fname) fname = fname[: fname.find(".")] - extraction_dir = os.path.join(datadir, fname + "_extracted") + extraction_dir = os.path.join(datadir, f"{fname}_extracted") else: extraction_dir = os.path.join(datadir, fname) - download_target = os.path.join(datadir, fname + "_archive") + download_target = os.path.join(datadir, f"{fname}_archive") else: extraction_dir = os.path.join(datadir, fname) - download_target = os.path.join(datadir, fname + "_archive") + download_target = os.path.join(datadir, f"{fname}_archive") else: download_target = os.path.join(datadir, fname) @@ -520,3 +526,4 @@ def makedirs(path): else: _raise_if_no_gfile(path) return os.makedirs(path) +"/fo" \ No newline at end of file diff --git a/keras/src/utils/file_utils_test.py b/keras/src/utils/file_utils_test.py index 04a092a8d038..4f39991f7c66 100644 --- a/keras/src/utils/file_utils_test.py +++ b/keras/src/utils/file_utils_test.py @@ -1,36 +1,38 @@ import hashlib import os -import pathlib import shutil import tarfile import tempfile import urllib +import urllib.parse +import urllib.request import zipfile from unittest.mock import patch + from keras.src.testing import test_case from keras.src.utils import file_utils class PathToStringTest(test_case.TestCase): def test_path_to_string_with_string_path(self): - path = "/path/to/file.txt" + path = os.path.join(os.path.sep, "path", "to", "file.txt") string_path = file_utils.path_to_string(path) self.assertEqual(string_path, path) def test_path_to_string_with_PathLike_object(self): - path = pathlib.Path("/path/to/file.txt") + path = os.path.join(os.path.sep, "path", "to", "file.txt") string_path = file_utils.path_to_string(path) self.assertEqual(string_path, str(path)) def test_path_to_string_with_non_string_typed_path_object(self): class NonStringTypedPathObject: def __fspath__(self): - return "/path/to/file.txt" + return os.path.join(os.path.sep, "path", "to", "file.txt") path = NonStringTypedPathObject() string_path = file_utils.path_to_string(path) - self.assertEqual(string_path, "/path/to/file.txt") + self.assertEqual(string_path, os.path.join(os.path.sep, "path", "to", "file.txt")) def test_path_to_string_with_none_path(self): string_path = file_utils.path_to_string(None) @@ -39,27 +41,27 @@ def test_path_to_string_with_none_path(self): class ResolvePathTest(test_case.TestCase): def test_resolve_path_with_absolute_path(self): - path = "/path/to/file.txt" + path = os.path.join(os.path.sep, "path", "to", "file.txt") resolved_path = file_utils.resolve_path(path) self.assertEqual(resolved_path, os.path.realpath(os.path.abspath(path))) def test_resolve_path_with_relative_path(self): - path = "./file.txt" + path = os.path.join(".", "file.txt") resolved_path = file_utils.resolve_path(path) self.assertEqual(resolved_path, os.path.realpath(os.path.abspath(path))) class IsPathInDirTest(test_case.TestCase): def test_is_path_in_dir_with_absolute_paths(self): - base_dir = "/path/to/base_dir" - path = "/path/to/base_dir/file.txt" + base_dir = os.path.join(os.path.sep, "path", "to", "base_dir") + path = os.path.join(base_dir, "file.txt") self.assertTrue(file_utils.is_path_in_dir(path, base_dir)) class IsLinkInDirTest(test_case.TestCase): def setUp(self): self._cleanup(os.path.join("test_path", "to", "base_dir")) - self._cleanup("./base_dir") + self._cleanup(os.path.join(".", "base_dir")) def _cleanup(self, base_dir): if os.path.exists(base_dir): @@ -93,7 +95,7 @@ def test_is_link_in_dir_with_absolute_paths(self): self.assertTrue(file_utils.is_link_in_dir(info, base_dir)) def test_is_link_in_dir_with_relative_paths(self): - base_dir = "./base_dir" + base_dir = os.path.join(".", "base_dir") link_path = os.path.join(base_dir, "symlink") target_path = os.path.join(base_dir, "file.txt") @@ -121,7 +123,7 @@ def test_is_link_in_dir_with_relative_paths(self): def tearDown(self): self._cleanup(os.path.join("test_path", "to", "base_dir")) - self._cleanup("./base_dir") + self._cleanup(os.path.join(".", "base_dir")) class FilterSafePathsTest(test_case.TestCase): @@ -486,10 +488,7 @@ def _test_file_extraction_and_validation( hashval_md5 = file_utils.hash_file(file_path, algorithm="md5") - if archive_type: - extract = True - else: - extract = False + extract = bool(archive_type) path = file_utils.get_file( "test", @@ -499,7 +498,7 @@ def _test_file_extraction_and_validation( cache_subdir=dest_dir, ) if extract: - fpath = path + "_archive" + fpath = f"{path}_archive" else: fpath = path diff --git a/keras/src/utils/io_utils.py b/keras/src/utils/io_utils.py index f087ab6dd21a..09e427fbd1c4 100644 --- a/keras/src/utils/io_utils.py +++ b/keras/src/utils/io_utils.py @@ -93,7 +93,7 @@ def print_msg(message, line_break=True): """Print the message to absl logging or stdout.""" message = str(message) if is_interactive_logging_enabled(): - message = message + "\n" if line_break else message + message = f"{message}\n" if line_break else message try: sys.stdout.write(message) except UnicodeEncodeError: diff --git a/keras/src/utils/jax_layer_test.py b/keras/src/utils/jax_layer_test.py index 96c74809d13d..009ecd402e5f 100644 --- a/keras/src/utils/jax_layer_test.py +++ b/keras/src/utils/jax_layer_test.py @@ -225,7 +225,7 @@ def verify_weights_and_params(layer): inputs1 = layers.Input(shape=input_shape) outputs1 = layer1(inputs1) model1 = models.Model( - inputs=inputs1, outputs=outputs1, name=model_name + "1" + inputs=inputs1, outputs=outputs1, name=f"{model_name}1" ) model1.summary() @@ -299,7 +299,7 @@ def verify_identical_model(model): input_shape=input_shape, **layer_init_kwargs, ) - model2 = models.Sequential([layer2], name=model_name + "2") + model2 = models.Sequential([layer2], name=f"{model_name}2") model2.summary() verify_weights_and_params(layer2) model2.compile( diff --git a/keras/src/utils/model_visualization.py b/keras/src/utils/model_visualization.py index 83b49f7aaf7c..fb5ec22ceaa4 100644 --- a/keras/src/utils/model_visualization.py +++ b/keras/src/utils/model_visualization.py @@ -178,7 +178,7 @@ def format_shape(shape): colspan = 1 if cols: - table += "" + "".join(cols) + "" + table += f"{''.join(cols)}" table += ">" return table diff --git a/keras/src/utils/naming_test.py b/keras/src/utils/naming_test.py index 00e3f6bdda30..25adc45885d5 100644 --- a/keras/src/utils/naming_test.py +++ b/keras/src/utils/naming_test.py @@ -22,7 +22,7 @@ def test_uniquify_non_unique_name(self): name = "non_unique_name" naming.uniquify(name) unique_name = naming.uniquify(name) - self.assertEqual(unique_name, name + "_1") + self.assertEqual(unique_name, f"{name}_1") def test_to_snake_case_snake_case_name(self): name = "snake_case_name" diff --git a/keras/src/utils/progbar.py b/keras/src/utils/progbar.py index 8a521eb654b4..cf3161250bff 100644 --- a/keras/src/utils/progbar.py +++ b/keras/src/utils/progbar.py @@ -120,16 +120,16 @@ def update(self, current, values=None, finalize=None): if self.target is not None: numdigits = int(math.log10(self.target)) + 1 - bar = ("%" + str(numdigits) + "d/%d") % (current, self.target) + bar = (f"%{numdigits}d/%d") % (current, self.target) bar = f"\x1b[1m{bar}\x1b[0m " special_char_len += 8 prog = float(current) / self.target prog_width = int(self.width * prog) if prog_width > 0: - bar += "\33[32m" + "━" * prog_width + "\x1b[0m" + bar += f"\33[32m{'━' * prog_width}\x1b[0m" special_char_len += 9 - bar += "\33[37m" + "━" * (self.width - prog_width) + "\x1b[0m" + bar += f"\33[37m{'━' * (self.width - prog_width)}\x1b[0m" special_char_len += 9 else: @@ -189,7 +189,7 @@ def update(self, current, values=None, finalize=None): elif self.verbose == 2: if finalize: numdigits = int(math.log10(self.target)) + 1 - count = ("%" + str(numdigits) + "d/%d") % (current, self.target) + count = f"%{numdigits}d/%d" % (current, self.target) info = f"{count} - {now - self._start:.0f}s" info += " -" + self._format_time(time_per_unit, self.unit_name) for k in self._values_order: diff --git a/keras/src/utils/summary_utils.py b/keras/src/utils/summary_utils.py index 2e67b5c2f841..57866d223264 100644 --- a/keras/src/utils/summary_utils.py +++ b/keras/src/utils/summary_utils.py @@ -87,7 +87,7 @@ def format_layer_shape(layer): def format_shape(shape): highlighted = [highlight_number(x) for x in shape] - return "(" + ", ".join(highlighted) + ")" + return f"({', '.join(highlighted)})" # There are 2 approaches to get output shapes: # 1. Using `layer._inbound_nodes`, which is possible if the model is a @@ -276,7 +276,7 @@ def get_layer_fields(layer, prefix=""): if not hasattr(layer, "built"): params = highlight_number(0) elif not layer.built: - params = highlight_number(0) + " (unbuilt)" + params = f"{highlight_number(0)} (unbuilt)" else: params = highlight_number(f"{layer.count_params():,}") @@ -296,7 +296,7 @@ def get_layer_fields(layer, prefix=""): def print_layer(layer, nested_level=0): if nested_level: - prefix = " " * nested_level + "└" + " " + prefix = " " * nested_level + "└ " else: prefix = "" @@ -388,6 +388,17 @@ def print_layer(layer, nested_level=0): else: print_fn(console.end_capture()) +def print(*args, **kwargs): + from keras.api import backend + import jax + import tensorflow as tf + + backend = backend.backend() + print_fn = {"jax": jax.debug.print, + "tensorflow": tf.print}[backend] + # "pytorch" https://pytorch.org/docs/stable/generated/torch.set_printoptions.html ? + # "openvino" + return print_fn(*args, **kwargs) def get_layer_index_bound_by_layer_name(layers, layer_range=None): """Get the layer indexes from the model based on layer names. From 1c06f928bf9a2039288980be3d16f20e1779fff1 Mon Sep 17 00:00:00 2001 From: Samuel Marks <807580+SamuelMarks@users.noreply.github.com> Date: Mon, 2 Jun 2025 13:25:17 -0600 Subject: [PATCH 2/5] [*.py] Use f-strings and `os.path.join` throughout ; `ruff format` --- api_gen.py | 5 +++-- guides/distributed_training_with_tensorflow.py | 3 ++- guides/training_with_built_in_methods.py | 6 ++++-- integration_tests/import_test.py | 8 +++++--- keras/src/applications/densenet.py | 12 +++--------- keras/src/applications/mobilenet_v2.py | 8 ++------ keras/src/callbacks/backup_and_restore.py | 4 +++- keras/src/callbacks/swap_ema_weights_test.py | 12 +++++++++--- keras/src/export/tf2onnx_lib.py | 4 +++- keras/src/optimizers/base_optimizer.py | 6 +++--- keras/src/saving/file_editor.py | 5 ++--- keras/src/saving/serialization_lib.py | 4 +--- keras/src/utils/file_utils.py | 4 +++- keras/src/utils/file_utils_test.py | 4 +++- keras/src/utils/summary_utils.py | 7 ++++--- 15 files changed, 50 insertions(+), 42 deletions(-) diff --git a/api_gen.py b/api_gen.py index 89c0be2c67e0..daa4e9f2d579 100644 --- a/api_gen.py +++ b/api_gen.py @@ -86,10 +86,11 @@ def create_legacy_directory(package_dir): legacy_fpath = os.path.join(root, fname) tf_keras_root = root.replace( os.path.join(os.path.sep, "_legacy"), - os.path.join(os.path.sep, "_tf_keras", "keras") + os.path.join(os.path.sep, "_tf_keras", "keras"), ) core_api_fpath = os.path.join( - root.replace(os.path.join(os.path.sep, "_legacy"), ""), fname + root.replace(os.path.join(os.path.sep, "_legacy"), ""), + fname, ) if not os.path.exists(tf_keras_root): os.makedirs(tf_keras_root) diff --git a/guides/distributed_training_with_tensorflow.py b/guides/distributed_training_with_tensorflow.py index 7c0e9b556532..0207eed0f1dd 100644 --- a/guides/distributed_training_with_tensorflow.py +++ b/guides/distributed_training_with_tensorflow.py @@ -194,7 +194,8 @@ def make_or_restore_model(): # Either restore the latest model, or create a fresh one # if there is no checkpoint available. checkpoints = [ - os.path.join(checkpoint_dir, name) for name in os.listdir(checkpoint_dir) + os.path.join(checkpoint_dir, name) + for name in os.listdir(checkpoint_dir) ] if checkpoints: latest_checkpoint = max(checkpoints, key=os.path.getctime) diff --git a/guides/training_with_built_in_methods.py b/guides/training_with_built_in_methods.py index a3dea4b32d87..49a9dad1d8a9 100644 --- a/guides/training_with_built_in_methods.py +++ b/guides/training_with_built_in_methods.py @@ -1133,7 +1133,8 @@ def make_or_restore_model(): # Either restore the latest model, or create a fresh one # if there is no checkpoint available. checkpoints = [ - os.path.join(checkpoint_dir, name) for name in os.listdir(checkpoint_dir) + os.path.join(checkpoint_dir, name) + for name in os.listdir(checkpoint_dir) ] if checkpoints: latest_checkpoint = max(checkpoints, key=os.path.getctime) @@ -1148,7 +1149,8 @@ def make_or_restore_model(): # This callback saves the model every 100 batches. # We include the training loss in the saved model name. keras.callbacks.ModelCheckpoint( - filepath=os.path.join(checkpoint_dir, "model-loss={loss:.2f}.keras"), save_freq=100 + filepath=os.path.join(checkpoint_dir, "model-loss={loss:.2f}.keras"), + save_freq=100, ) ] model.fit(x_train, y_train, epochs=1, callbacks=callbacks) diff --git a/integration_tests/import_test.py b/integration_tests/import_test.py index a27f0b1de5e5..7cbea0c29fbf 100644 --- a/integration_tests/import_test.py +++ b/integration_tests/import_test.py @@ -56,9 +56,11 @@ def manage_venv_installs(whl_path): "pip install -r requirements-common.txt", "pip install pytest", # Ensure other backends are uninstalled - "pip uninstall -y {0} {1} {2}".format(BACKEND_REQ[other_backends[0]][0], - BACKEND_REQ[other_backends[1]][0], - BACKEND_REQ[other_backends[2]][0]), + "pip uninstall -y {0} {1} {2}".format( + BACKEND_REQ[other_backends[0]][0], + BACKEND_REQ[other_backends[1]][0], + BACKEND_REQ[other_backends[2]][0], + ), # Install `.whl` package f"pip install {whl_path}", ] diff --git a/keras/src/applications/densenet.py b/keras/src/applications/densenet.py index 476256ed3c47..001d28b92daa 100644 --- a/keras/src/applications/densenet.py +++ b/keras/src/applications/densenet.py @@ -12,21 +12,15 @@ DENSENET121_WEIGHT_PATH = ( f"{BASE_WEIGHTS_PATH}densenet121_weights_tf_dim_ordering_tf_kernels.h5" ) -DENSENET121_WEIGHT_PATH_NO_TOP = ( - f"{BASE_WEIGHTS_PATH}densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5" -) +DENSENET121_WEIGHT_PATH_NO_TOP = f"{BASE_WEIGHTS_PATH}densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5" DENSENET169_WEIGHT_PATH = ( f"{BASE_WEIGHTS_PATH}densenet169_weights_tf_dim_ordering_tf_kernels.h5" ) -DENSENET169_WEIGHT_PATH_NO_TOP = ( - f"{BASE_WEIGHTS_PATH}densenet169_weights_tf_dim_ordering_tf_kernels_notop.h5" -) +DENSENET169_WEIGHT_PATH_NO_TOP = f"{BASE_WEIGHTS_PATH}densenet169_weights_tf_dim_ordering_tf_kernels_notop.h5" DENSENET201_WEIGHT_PATH = ( f"{BASE_WEIGHTS_PATH}densenet201_weights_tf_dim_ordering_tf_kernels.h5" ) -DENSENET201_WEIGHT_PATH_NO_TOP = ( - f"{BASE_WEIGHTS_PATH}densenet201_weights_tf_dim_ordering_tf_kernels_notop.h5" -) +DENSENET201_WEIGHT_PATH_NO_TOP = f"{BASE_WEIGHTS_PATH}densenet201_weights_tf_dim_ordering_tf_kernels_notop.h5" def dense_block(x, blocks, name): diff --git a/keras/src/applications/mobilenet_v2.py b/keras/src/applications/mobilenet_v2.py index b97aada53754..b0d61068fca9 100644 --- a/keras/src/applications/mobilenet_v2.py +++ b/keras/src/applications/mobilenet_v2.py @@ -368,17 +368,13 @@ def MobileNetV2( # Load weights. if weights == "imagenet": if include_top: - model_name = ( - f"mobilenet_v2_weights_tf_dim_ordering_tf_kernels_{float(alpha)}_{rows}.h5" - ) + model_name = f"mobilenet_v2_weights_tf_dim_ordering_tf_kernels_{float(alpha)}_{rows}.h5" weight_path = BASE_WEIGHT_PATH + model_name weights_path = file_utils.get_file( model_name, weight_path, cache_subdir="models" ) else: - model_name = ( - f"mobilenet_v2_weights_tf_dim_ordering_tf_kernels_{float(alpha)}_{rows}_no_top.h5" - ) + model_name = f"mobilenet_v2_weights_tf_dim_ordering_tf_kernels_{float(alpha)}_{rows}_no_top.h5" weight_path = BASE_WEIGHT_PATH + model_name weights_path = file_utils.get_file( model_name, weight_path, cache_subdir="models" diff --git a/keras/src/callbacks/backup_and_restore.py b/keras/src/callbacks/backup_and_restore.py index 6e3add89eb6e..55053cc43640 100644 --- a/keras/src/callbacks/backup_and_restore.py +++ b/keras/src/callbacks/backup_and_restore.py @@ -100,7 +100,9 @@ def __init__( backup_dir, "training_metadata.json" ) self._prev_weights_path = f"{self._weights_path}.bkp" - self._prev_training_metadata_path = f"{self._training_metadata_path}.bkp" + self._prev_training_metadata_path = ( + f"{self._training_metadata_path}.bkp" + ) if save_freq != "epoch" and not isinstance(save_freq, int): raise ValueError( "Invalid value for argument `save_freq`. " diff --git a/keras/src/callbacks/swap_ema_weights_test.py b/keras/src/callbacks/swap_ema_weights_test.py index 63149fd3b3ce..795f1452a189 100644 --- a/keras/src/callbacks/swap_ema_weights_test.py +++ b/keras/src/callbacks/swap_ema_weights_test.py @@ -108,7 +108,9 @@ def test_swap_ema_weights_on_epoch(self): epochs=2, callbacks=[ callbacks.SwapEMAWeights(swap_on_epoch=True), - callbacks.ModelCheckpoint(os.path.join(temp_dir, "{epoch:1d}.keras")), + callbacks.ModelCheckpoint( + os.path.join(temp_dir, "{epoch:1d}.keras") + ), ], validation_data=(self.x_train, self.y_train), ) @@ -167,12 +169,16 @@ def test_swap_ema_weights_with_tf_distribute(self): callbacks=[ callbacks.SwapEMAWeights(swap_on_epoch=True), callbacks.ModelCheckpoint( - os.path.join(temp_dir, "distributed_{epoch:1d}.keras") + os.path.join( + temp_dir, "distributed_{epoch:1d}.keras" + ) ), ], validation_data=(self.x_train, self.y_train), ) - model2 = saving.load_model(os.path.join(temp_dir, "distributed_2.keras")) + model2 = saving.load_model( + os.path.join(temp_dir, "distributed_2.keras") + ) logs = model.evaluate(self.x_train, self.y_train, return_dict=True) logs2 = model2.evaluate(self.x_train, self.y_train, return_dict=True) # saved checkpoint will be applied by EMA weights diff --git a/keras/src/export/tf2onnx_lib.py b/keras/src/export/tf2onnx_lib.py index 975a524859a2..b6ff3dfe37ae 100644 --- a/keras/src/export/tf2onnx_lib.py +++ b/keras/src/export/tf2onnx_lib.py @@ -156,7 +156,9 @@ def prod(x): > external_tensor_storage.external_tensor_size_threshold ): a = copy.deepcopy(a) - tensor_name = f"{self.name.strip()}_{external_tensor_storage.name_counter}" + tensor_name = ( + f"{self.name.strip()}_{external_tensor_storage.name_counter}" + ) for c in '~"#%&*:<>?/\\{|}': tensor_name = tensor_name.replace(c, "_") external_tensor_storage.name_counter += 1 diff --git a/keras/src/optimizers/base_optimizer.py b/keras/src/optimizers/base_optimizer.py index 30f951904bb9..4356b1f5131b 100644 --- a/keras/src/optimizers/base_optimizer.py +++ b/keras/src/optimizers/base_optimizer.py @@ -312,9 +312,9 @@ def add_variable_from_reference( if hasattr(reference_variable, "path"): name = f"{reference_variable.path.replace('/', '_')}_{name}" else: - sanitised_ref_name = (str(reference_variable.name) - .replace('/', '_') - .replace(':', '_')) + sanitised_ref_name = ( + str(reference_variable.name).replace("/", "_").replace(":", "_") + ) name = f"{sanitised_ref_name}_{name}" return self.add_variable( shape=reference_variable.shape, diff --git a/keras/src/saving/file_editor.py b/keras/src/saving/file_editor.py index 6bbc36131b8e..dcd416ce8bd5 100644 --- a/keras/src/saving/file_editor.py +++ b/keras/src/saving/file_editor.py @@ -563,9 +563,8 @@ def _generate_html_weights(dictionary, margin_left=0, font_size=1): f"font-size: {font_size}em; " "font-weight: bold;" f'">{key}' - f"{_generate_html_weights( - value, margin_left + 20, font_size - 1 - )}" + f"{_generate_html_weights(value, margin_left + 20, font_size - 1)}" + "" ) else: html += ( diff --git a/keras/src/saving/serialization_lib.py b/keras/src/saving/serialization_lib.py index f743aa649367..b9a1c39250de 100644 --- a/keras/src/saving/serialization_lib.py +++ b/keras/src/saving/serialization_lib.py @@ -775,9 +775,7 @@ def _retrieve_class_or_fn( # the corresponding function from the identifying string. if obj_type == "function" and module == "builtins": for mod in BUILTIN_MODULES: - obj = api_export.get_symbol_from_name( - f"keras.{mod}.{name}" - ) + obj = api_export.get_symbol_from_name(f"keras.{mod}.{name}") if obj is not None: return obj diff --git a/keras/src/utils/file_utils.py b/keras/src/utils/file_utils.py index 6f89a96dab97..71c04d0d39c0 100644 --- a/keras/src/utils/file_utils.py +++ b/keras/src/utils/file_utils.py @@ -526,4 +526,6 @@ def makedirs(path): else: _raise_if_no_gfile(path) return os.makedirs(path) -"/fo" \ No newline at end of file + + +"/fo" diff --git a/keras/src/utils/file_utils_test.py b/keras/src/utils/file_utils_test.py index 4f39991f7c66..03a169738037 100644 --- a/keras/src/utils/file_utils_test.py +++ b/keras/src/utils/file_utils_test.py @@ -32,7 +32,9 @@ def __fspath__(self): path = NonStringTypedPathObject() string_path = file_utils.path_to_string(path) - self.assertEqual(string_path, os.path.join(os.path.sep, "path", "to", "file.txt")) + self.assertEqual( + string_path, os.path.join(os.path.sep, "path", "to", "file.txt") + ) def test_path_to_string_with_none_path(self): string_path = file_utils.path_to_string(None) diff --git a/keras/src/utils/summary_utils.py b/keras/src/utils/summary_utils.py index 57866d223264..96f0a48d2c11 100644 --- a/keras/src/utils/summary_utils.py +++ b/keras/src/utils/summary_utils.py @@ -268,7 +268,7 @@ def get_connections(layer): def get_layer_fields(layer, prefix=""): output_shape = format_layer_shape(layer) - name = prefix + layer.name + name = f"{prefix}{layer.name}" cls_name = layer.__class__.__name__ name = rich.markup.escape(name) name += f" ({highlight_symbol(rich.markup.escape(cls_name))})" @@ -388,18 +388,19 @@ def print_layer(layer, nested_level=0): else: print_fn(console.end_capture()) + def print(*args, **kwargs): from keras.api import backend import jax import tensorflow as tf backend = backend.backend() - print_fn = {"jax": jax.debug.print, - "tensorflow": tf.print}[backend] + print_fn = {"jax": jax.debug.print, "tensorflow": tf.print}[backend] # "pytorch" https://pytorch.org/docs/stable/generated/torch.set_printoptions.html ? # "openvino" return print_fn(*args, **kwargs) + def get_layer_index_bound_by_layer_name(layers, layer_range=None): """Get the layer indexes from the model based on layer names. From 791273461b82da5b65c07ad27c5cc1476d467632 Mon Sep 17 00:00:00 2001 From: Samuel Marks <807580+SamuelMarks@users.noreply.github.com> Date: Mon, 2 Jun 2025 13:31:38 -0600 Subject: [PATCH 3/5] [keras/src/backend/common/variables.py] Add missing `import os.path` ; [keras/src/ops/ops_test.py] Use f-string over concatenation --- keras/src/backend/common/variables.py | 2 ++ keras/src/ops/ops_test.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/keras/src/backend/common/variables.py b/keras/src/backend/common/variables.py index 36e5305d6faf..96f7f8af6f8c 100644 --- a/keras/src/backend/common/variables.py +++ b/keras/src/backend/common/variables.py @@ -1,3 +1,5 @@ +import os.path + import numpy as np from keras.src import backend diff --git a/keras/src/ops/ops_test.py b/keras/src/ops/ops_test.py index 57c1c6fd11e3..fe5444e86ead 100644 --- a/keras/src/ops/ops_test.py +++ b/keras/src/ops/ops_test.py @@ -170,7 +170,7 @@ def test_backend_consistency(self, module_name): for op_function, _ in op_functions_and_classes(ops_module): name = op_function.__name__ - if hasattr(ops_module, "_" + name): + if hasattr(ops_module, f"_{name}"): # For an op function `foo`, if there is a function named `_foo`, # that means we have a backend independent implementation. continue From 58b637e8fe8cc42ab50b9384eda88d4fbb936450 Mon Sep 17 00:00:00 2001 From: Samuel Marks <807580+SamuelMarks@users.noreply.github.com> Date: Mon, 2 Jun 2025 13:41:12 -0600 Subject: [PATCH 4/5] [*] Run `pre-commit run --all-files` and resolve found issues --- keras/src/applications/densenet.py | 15 ++++++++++++--- keras/src/applications/mobilenet_v2.py | 10 ++++++++-- keras/src/applications/nasnet.py | 5 +++-- keras/src/legacy/preprocessing/image.py | 11 ++++++----- keras/src/saving/file_editor.py | 5 ++++- keras/src/utils/file_utils.py | 1 - keras/src/utils/file_utils_test.py | 1 - keras/src/utils/summary_utils.py | 6 ++++-- 8 files changed, 37 insertions(+), 17 deletions(-) diff --git a/keras/src/applications/densenet.py b/keras/src/applications/densenet.py index 001d28b92daa..22c01c15ff3c 100644 --- a/keras/src/applications/densenet.py +++ b/keras/src/applications/densenet.py @@ -12,15 +12,24 @@ DENSENET121_WEIGHT_PATH = ( f"{BASE_WEIGHTS_PATH}densenet121_weights_tf_dim_ordering_tf_kernels.h5" ) -DENSENET121_WEIGHT_PATH_NO_TOP = f"{BASE_WEIGHTS_PATH}densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5" +DENSENET121_WEIGHT_PATH_NO_TOP = ( + f"{BASE_WEIGHTS_PATH}" + "densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5" +) DENSENET169_WEIGHT_PATH = ( f"{BASE_WEIGHTS_PATH}densenet169_weights_tf_dim_ordering_tf_kernels.h5" ) -DENSENET169_WEIGHT_PATH_NO_TOP = f"{BASE_WEIGHTS_PATH}densenet169_weights_tf_dim_ordering_tf_kernels_notop.h5" +DENSENET169_WEIGHT_PATH_NO_TOP = ( + f"{BASE_WEIGHTS_PATH}" + "densenet169_weights_tf_dim_ordering_tf_kernels_notop.h5" +) DENSENET201_WEIGHT_PATH = ( f"{BASE_WEIGHTS_PATH}densenet201_weights_tf_dim_ordering_tf_kernels.h5" ) -DENSENET201_WEIGHT_PATH_NO_TOP = f"{BASE_WEIGHTS_PATH}densenet201_weights_tf_dim_ordering_tf_kernels_notop.h5" +DENSENET201_WEIGHT_PATH_NO_TOP = ( + f"{BASE_WEIGHTS_PATH}" + "densenet201_weights_tf_dim_ordering_tf_kernels_notop.h5" +) def dense_block(x, blocks, name): diff --git a/keras/src/applications/mobilenet_v2.py b/keras/src/applications/mobilenet_v2.py index b0d61068fca9..50e475329e63 100644 --- a/keras/src/applications/mobilenet_v2.py +++ b/keras/src/applications/mobilenet_v2.py @@ -368,13 +368,19 @@ def MobileNetV2( # Load weights. if weights == "imagenet": if include_top: - model_name = f"mobilenet_v2_weights_tf_dim_ordering_tf_kernels_{float(alpha)}_{rows}.h5" + model_name = ( + "mobilenet_v2_weights_tf_dim_ordering_tf_kernels" + f"_{float(alpha)}_{rows}.h5" + ) weight_path = BASE_WEIGHT_PATH + model_name weights_path = file_utils.get_file( model_name, weight_path, cache_subdir="models" ) else: - model_name = f"mobilenet_v2_weights_tf_dim_ordering_tf_kernels_{float(alpha)}_{rows}_no_top.h5" + model_name = ( + "mobilenet_v2_weights_tf_dim_ordering_tf_kernels_" + f"{float(alpha)}_{rows}_no_top.h5" + ) weight_path = BASE_WEIGHT_PATH + model_name weights_path = file_utils.get_file( model_name, weight_path, cache_subdir="models" diff --git a/keras/src/applications/nasnet.py b/keras/src/applications/nasnet.py index 0f935611bc6d..e0f55da4f467 100644 --- a/keras/src/applications/nasnet.py +++ b/keras/src/applications/nasnet.py @@ -137,8 +137,9 @@ def NASNet( and weights == "imagenet" ): raise ValueError( - "When specifying the input shape of a NASNet and loading `ImageNet` weights, " - f"the input_shape argument must be static (no None entries). Got: `input_shape={input_shape}`." + "When specifying the input shape of a NASNet and loading " + "`ImageNet` weights, the input_shape argument must be static" + f" (no None entries). Got: `input_shape={input_shape}`." ) if default_size is None: diff --git a/keras/src/legacy/preprocessing/image.py b/keras/src/legacy/preprocessing/image.py index 5409ea87b425..fd5f3a00bdb7 100644 --- a/keras/src/legacy/preprocessing/image.py +++ b/keras/src/legacy/preprocessing/image.py @@ -617,9 +617,10 @@ def __init__( channels_axis = 3 if data_format == "channels_last" else 1 if self.x.shape[channels_axis] not in {1, 3, 4}: warnings.warn( - f'NumpyArrayIterator is set to use the data format convention "{data_format}' - f'" (channels on axis {channels_axis})' - f", i.e. expected either 1, 3, or 4 channels on axis {channels_axis}. " + f"NumpyArrayIterator is set to use the data format convention" + f' "{data_format}" (channels on axis {channels_axis})' + ", i.e. expected either 1, 3, or 4 channels " + f"on axis {channels_axis}. " f"However, it was passed an array with shape {self.x.shape}" f" ({self.x.shape[channels_axis]} channels)." ) @@ -1491,8 +1492,8 @@ def fit(self, x, augment=False, rounds=1, seed=None): f'following the data format convention "{self.data_format}' f'" (channels on axis {self.channel_axis})' ", i.e. expected either 1, 3 or 4 channels on axis " - f"{self.channel_axis}. However, it was passed an array with shape " - f"{x.shape} ({x.shape[self.channel_axis]} channels)." + f"{self.channel_axis}. However, it was passed an array with" + f" shape {x.shape} ({x.shape[self.channel_axis]} channels)." ) if seed is not None: diff --git a/keras/src/saving/file_editor.py b/keras/src/saving/file_editor.py index dcd416ce8bd5..b486590f2132 100644 --- a/keras/src/saving/file_editor.py +++ b/keras/src/saving/file_editor.py @@ -557,13 +557,16 @@ def _generate_html_weights(dictionary, margin_left=0, font_size=1): html = "" for key, value in dictionary.items(): if isinstance(value, dict) and value: + weights_html = _generate_html_weights( + value, margin_left + 20, font_size - 1 + ) html += ( f'
' '{key}' - f"{_generate_html_weights(value, margin_left + 20, font_size - 1)}" + f"{weights_html}" "
" ) else: diff --git a/keras/src/utils/file_utils.py b/keras/src/utils/file_utils.py index 71c04d0d39c0..55d0a8d7b76e 100644 --- a/keras/src/utils/file_utils.py +++ b/keras/src/utils/file_utils.py @@ -9,7 +9,6 @@ import urllib.parse import warnings import zipfile -from tempfile import tempdir from urllib.request import urlretrieve from keras.src.api_export import keras_export diff --git a/keras/src/utils/file_utils_test.py b/keras/src/utils/file_utils_test.py index 03a169738037..da99ed627576 100644 --- a/keras/src/utils/file_utils_test.py +++ b/keras/src/utils/file_utils_test.py @@ -9,7 +9,6 @@ import zipfile from unittest.mock import patch - from keras.src.testing import test_case from keras.src.utils import file_utils diff --git a/keras/src/utils/summary_utils.py b/keras/src/utils/summary_utils.py index 96f0a48d2c11..dcdd1b7ce865 100644 --- a/keras/src/utils/summary_utils.py +++ b/keras/src/utils/summary_utils.py @@ -390,13 +390,15 @@ def print_layer(layer, nested_level=0): def print(*args, **kwargs): - from keras.api import backend import jax import tensorflow as tf + from keras.api import backend + backend = backend.backend() print_fn = {"jax": jax.debug.print, "tensorflow": tf.print}[backend] - # "pytorch" https://pytorch.org/docs/stable/generated/torch.set_printoptions.html ? + # "pytorch" + # pytorch.org/docs/stable/generated/torch.set_printoptions.html ? # "openvino" return print_fn(*args, **kwargs) From 70fa3ea0554f6444fa71e38fb45421d627f1c814 Mon Sep 17 00:00:00 2001 From: Samuel Marks <807580+SamuelMarks@users.noreply.github.com> Date: Mon, 2 Jun 2025 14:39:29 -0600 Subject: [PATCH 5/5] [keras/src/utils/summary_utils.py] Remove unrelated to f-string & `os.path.join` code --- keras/src/utils/summary_utils.py | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/keras/src/utils/summary_utils.py b/keras/src/utils/summary_utils.py index dcdd1b7ce865..a8cb253fd1e0 100644 --- a/keras/src/utils/summary_utils.py +++ b/keras/src/utils/summary_utils.py @@ -389,20 +389,6 @@ def print_layer(layer, nested_level=0): print_fn(console.end_capture()) -def print(*args, **kwargs): - import jax - import tensorflow as tf - - from keras.api import backend - - backend = backend.backend() - print_fn = {"jax": jax.debug.print, "tensorflow": tf.print}[backend] - # "pytorch" - # pytorch.org/docs/stable/generated/torch.set_printoptions.html ? - # "openvino" - return print_fn(*args, **kwargs) - - def get_layer_index_bound_by_layer_name(layers, layer_range=None): """Get the layer indexes from the model based on layer names.