= jrand.normal(jrand.PRNGKey(0), (100, 10))
inputs = Encoder([100, 10])(inputs, training=True)
encoded_x assert encoded_x[0].shape == (100, 5)
assert encoded_x[1].shape == (100, 5)
= Decoder([100, 10], 10)(inputs, training=True)
decoded_x assert decoded_x.shape == (100, 10)
CLUE
relax.methods.clue.Decoder
class relax.methods.clue.Decoder (sizes, output_size, dropout=0.1)
This is the class from which all layers inherit.
A layer is a callable object that takes as input one or more tensors and that outputs one or more tensors. It involves computation, defined in the call()
method, and a state (weight variables). State can be created:
- in
__init__()
, for instance viaself.add_weight()
; - in the optional
build()
method, which is invoked by the first__call__()
to the layer, and supplies the shape(s) of the input(s), which may not have been known at initialization time.
Layers are recursively composable: If you assign a Layer instance as an attribute of another Layer, the outer layer will start tracking the weights created by the inner layer. Nested layers should be instantiated in the __init__()
method or build()
method.
Users will just instantiate a layer and then treat it as a callable.
Args: trainable: Boolean, whether the layer’s variables should be trainable. name: String name of the layer. dtype: The dtype of the layer’s computations and weights. Can also be a keras.DTypePolicy
, which allows the computation and weight dtype to differ. Defaults to None
. None
means to use keras.config.dtype_policy()
, which is a float32
policy unless set to different value (via keras.config.set_dtype_policy()
).
Attributes: name: The name of the layer (string). dtype: Dtype of the layer’s weights. Alias of layer.variable_dtype
. variable_dtype: Dtype of the layer’s weights. compute_dtype: The dtype of the layer’s computations. Layers automatically cast inputs to this dtype, which causes the computations and output to also be in this dtype. When mixed precision is used with a keras.DTypePolicy
, this will be different than variable_dtype
. trainable_weights: List of variables to be included in backprop. non_trainable_weights: List of variables that should not be included in backprop. weights: The concatenation of the lists trainable_weights and non_trainable_weights (in this order). trainable: Whether the layer should be trained (boolean), i.e. whether its potentially-trainable weights should be returned as part of layer.trainable_weights
. input_spec: Optional (list of) InputSpec
object(s) specifying the constraints on inputs that can be accepted by the layer.
We recommend that descendants of Layer
implement the following methods:
__init__()
: Defines custom layer attributes, and creates layer weights that do not depend on input shapes, usingadd_weight()
, or other state.build(self, input_shape)
: This method can be used to create weights that depend on the shape(s) of the input(s), usingadd_weight()
, or other state.__call__()
will automatically build the layer (if it has not been built yet) by callingbuild()
.call(self, *args, **kwargs)
: Called in__call__
after making surebuild()
has been called.call()
performs the logic of applying the layer to the input arguments. Two reserved keyword arguments you can optionally use incall()
are: 1.training
(boolean, whether the call is in inference mode or training mode). 2.mask
(boolean tensor encoding masked timesteps in the input, used e.g. in RNN layers). A typical signature for this method iscall(self, inputs)
, and user could optionally addtraining
andmask
if the layer need them.get_config(self)
: Returns a dictionary containing the configuration used to initialize this layer. If the keys differ from the arguments in__init__()
, then overridefrom_config(self)
as well. This method is used when saving the layer or a model that contains this layer.
Examples:
Here’s a basic example: a layer with two variables, w
and b
, that returns y = w . x + b
. It shows how to implement build()
and call()
. Variables set as attributes of a layer are tracked as weights of the layers (in layer.weights
).
class SimpleDense(Layer):
def __init__(self, units=32):
super().__init__()
self.units = units
# Create the state of the layer (weights)
def build(self, input_shape):
self.kernel = self.add_weight(
=(input_shape[-1], self.units),
shape="glorot_uniform",
initializer=True,
trainable="kernel",
name
)self.bias = self.add_weight(
=(self.units,),
shape="zeros",
initializer=True,
trainable="bias",
name
)
# Defines the computation
def call(self, inputs):
return ops.matmul(inputs, self.kernel) + self.bias
# Instantiates the layer.
= SimpleDense(4)
linear_layer
# This will also call `build(input_shape)` and create the weights.
= linear_layer(ops.ones((2, 2)))
y assert len(linear_layer.weights) == 2
# These weights are trainable, so they're listed in `trainable_weights`:
assert len(linear_layer.trainable_weights) == 2
Besides trainable weights, updated via backpropagation during training, layers can also have non-trainable weights. These weights are meant to be updated manually during call()
. Here’s a example layer that computes the running sum of its inputs:
class ComputeSum(Layer):
def __init__(self, input_dim):
super(ComputeSum, self).__init__()
# Create a non-trainable weight.
self.total = self.add_weight(
=(),
shape="zeros",
initializer=False,
trainable="total",
name
)
def call(self, inputs):
self.total.assign(self.total + ops.sum(inputs))
return self.total
= ComputeSum(2)
my_sum = ops.ones((2, 2))
x = my_sum(x)
y
assert my_sum.weights == [my_sum.total]
assert my_sum.non_trainable_weights == [my_sum.total]
assert my_sum.trainable_weights == []
relax.methods.clue.Encoder
class relax.methods.clue.Encoder (sizes, dropout=0.1)
This is the class from which all layers inherit.
A layer is a callable object that takes as input one or more tensors and that outputs one or more tensors. It involves computation, defined in the call()
method, and a state (weight variables). State can be created:
- in
__init__()
, for instance viaself.add_weight()
; - in the optional
build()
method, which is invoked by the first__call__()
to the layer, and supplies the shape(s) of the input(s), which may not have been known at initialization time.
Layers are recursively composable: If you assign a Layer instance as an attribute of another Layer, the outer layer will start tracking the weights created by the inner layer. Nested layers should be instantiated in the __init__()
method or build()
method.
Users will just instantiate a layer and then treat it as a callable.
Args: trainable: Boolean, whether the layer’s variables should be trainable. name: String name of the layer. dtype: The dtype of the layer’s computations and weights. Can also be a keras.DTypePolicy
, which allows the computation and weight dtype to differ. Defaults to None
. None
means to use keras.config.dtype_policy()
, which is a float32
policy unless set to different value (via keras.config.set_dtype_policy()
).
Attributes: name: The name of the layer (string). dtype: Dtype of the layer’s weights. Alias of layer.variable_dtype
. variable_dtype: Dtype of the layer’s weights. compute_dtype: The dtype of the layer’s computations. Layers automatically cast inputs to this dtype, which causes the computations and output to also be in this dtype. When mixed precision is used with a keras.DTypePolicy
, this will be different than variable_dtype
. trainable_weights: List of variables to be included in backprop. non_trainable_weights: List of variables that should not be included in backprop. weights: The concatenation of the lists trainable_weights and non_trainable_weights (in this order). trainable: Whether the layer should be trained (boolean), i.e. whether its potentially-trainable weights should be returned as part of layer.trainable_weights
. input_spec: Optional (list of) InputSpec
object(s) specifying the constraints on inputs that can be accepted by the layer.
We recommend that descendants of Layer
implement the following methods:
__init__()
: Defines custom layer attributes, and creates layer weights that do not depend on input shapes, usingadd_weight()
, or other state.build(self, input_shape)
: This method can be used to create weights that depend on the shape(s) of the input(s), usingadd_weight()
, or other state.__call__()
will automatically build the layer (if it has not been built yet) by callingbuild()
.call(self, *args, **kwargs)
: Called in__call__
after making surebuild()
has been called.call()
performs the logic of applying the layer to the input arguments. Two reserved keyword arguments you can optionally use incall()
are: 1.training
(boolean, whether the call is in inference mode or training mode). 2.mask
(boolean tensor encoding masked timesteps in the input, used e.g. in RNN layers). A typical signature for this method iscall(self, inputs)
, and user could optionally addtraining
andmask
if the layer need them.get_config(self)
: Returns a dictionary containing the configuration used to initialize this layer. If the keys differ from the arguments in__init__()
, then overridefrom_config(self)
as well. This method is used when saving the layer or a model that contains this layer.
Examples:
Here’s a basic example: a layer with two variables, w
and b
, that returns y = w . x + b
. It shows how to implement build()
and call()
. Variables set as attributes of a layer are tracked as weights of the layers (in layer.weights
).
class SimpleDense(Layer):
def __init__(self, units=32):
super().__init__()
self.units = units
# Create the state of the layer (weights)
def build(self, input_shape):
self.kernel = self.add_weight(
=(input_shape[-1], self.units),
shape="glorot_uniform",
initializer=True,
trainable="kernel",
name
)self.bias = self.add_weight(
=(self.units,),
shape="zeros",
initializer=True,
trainable="bias",
name
)
# Defines the computation
def call(self, inputs):
return ops.matmul(inputs, self.kernel) + self.bias
# Instantiates the layer.
= SimpleDense(4)
linear_layer
# This will also call `build(input_shape)` and create the weights.
= linear_layer(ops.ones((2, 2)))
y assert len(linear_layer.weights) == 2
# These weights are trainable, so they're listed in `trainable_weights`:
assert len(linear_layer.trainable_weights) == 2
Besides trainable weights, updated via backpropagation during training, layers can also have non-trainable weights. These weights are meant to be updated manually during call()
. Here’s a example layer that computes the running sum of its inputs:
class ComputeSum(Layer):
def __init__(self, input_dim):
super(ComputeSum, self).__init__()
# Create a non-trainable weight.
self.total = self.add_weight(
=(),
shape="zeros",
initializer=False,
trainable="total",
name
)
def call(self, inputs):
self.total.assign(self.total + ops.sum(inputs))
return self.total
= ComputeSum(2)
my_sum = ops.ones((2, 2))
x = my_sum(x)
y
assert my_sum.weights == [my_sum.total]
assert my_sum.non_trainable_weights == [my_sum.total]
assert my_sum.trainable_weights == []
relax.methods.clue.kl_divergence
relax.methods.clue.kl_divergence (p, q, eps=7.62939453125e-06)
relax.methods.clue.VAEGaussCat
class relax.methods.clue.VAEGaussCat (enc_sizes=[20, 16, 14, 12], dec_sizes=[12, 14, 16, 20], dropout_rate=0.1)
A model grouping layers into an object with training/inference features.
There are three ways to instantiate a Model
:
With the “Functional API”
You start from Input
, you chain layer calls to specify the model’s forward pass, and finally you create your model from inputs and outputs:
= keras.Input(shape=(37,))
inputs = keras.layers.Dense(32, activation="relu")(inputs)
x = keras.layers.Dense(5, activation="softmax")(x)
outputs = keras.Model(inputs=inputs, outputs=outputs) model
Note: Only dicts, lists, and tuples of input tensors are supported. Nested inputs are not supported (e.g. lists of list or dicts of dict).
A new Functional API model can also be created by using the intermediate tensors. This enables you to quickly extract sub-components of the model.
Example:
= keras.Input(shape=(None, None, 3))
inputs = keras.layers.RandomCrop(width=128, height=128)(inputs)
processed = keras.layers.Conv2D(filters=32, kernel_size=3)(processed)
conv = keras.layers.GlobalAveragePooling2D()(conv)
pooling = keras.layers.Dense(10)(pooling)
feature
= keras.Model(inputs, feature)
full_model = keras.Model(processed, conv)
backbone = keras.Model(conv, feature) activations
Note that the backbone
and activations
models are not created with keras.Input
objects, but with the tensors that originate from keras.Input
objects. Under the hood, the layers and weights will be shared across these models, so that user can train the full_model
, and use backbone
or activations
to do feature extraction. The inputs and outputs of the model can be nested structures of tensors as well, and the created models are standard Functional API models that support all the existing APIs.
By subclassing the Model
class
In that case, you should define your layers in __init__()
and you should implement the model’s forward pass in call()
.
class MyModel(keras.Model):
def __init__(self):
super().__init__()
self.dense1 = keras.layers.Dense(32, activation="relu")
self.dense2 = keras.layers.Dense(5, activation="softmax")
def call(self, inputs):
= self.dense1(inputs)
x return self.dense2(x)
= MyModel() model
If you subclass Model
, you can optionally have a training
argument (boolean) in call()
, which you can use to specify a different behavior in training and inference:
class MyModel(keras.Model):
def __init__(self):
super().__init__()
self.dense1 = keras.layers.Dense(32, activation="relu")
self.dense2 = keras.layers.Dense(5, activation="softmax")
self.dropout = keras.layers.Dropout(0.5)
def call(self, inputs, training=False):
= self.dense1(inputs)
x = self.dropout(x, training=training)
x return self.dense2(x)
= MyModel() model
Once the model is created, you can config the model with losses and metrics with model.compile()
, train the model with model.fit()
, or use the model to do prediction with model.predict()
.
With the Sequential
class
In addition, keras.Sequential
is a special case of model where the model is purely a stack of single-input, single-output layers.
= keras.Sequential([
model =(None, None, 3)),
keras.Input(shape=32, kernel_size=3),
keras.layers.Conv2D(filters ])
= VAEGaussCat()
vae_model compile(optimizer=keras.optimizers.Adam(0.001), loss=None)
vae_model.= load_data('dummy')
dm = dm['train']
xs, _ = vae_model.fit(
history
xs, xs,=64,
batch_size=2,
epochs=0 # Set to 1 for training progress
verbose
)assert history.history['loss'][0] > history.history['loss'][-1]
/home/birk/code/jax-relax/relax/data_module.py:234: UserWarning: Passing `config` will have no effect.
warnings.warn("Passing `config` will have no effect.")
= xs[:1]
x = load_ml_module('dummy').pred_fn
pred_fn = _clue_generate(
cf
x,
jrand.PRNGKey(get_config().global_seed),=1 - pred_fn(x),
y_target=pred_fn,
pred_fn=100,
max_steps=0.1,
step_size=vae_model,
vae_module=1.,
uncertainty_weight=1.,
aleatoric_weight=1.,
prior_weight=1.,
distance_weight=1.,
validity_weight=keras.losses.get({'class_name': 'KLDivergence', 'config': {'reduction': None}}),
validity_fn=lambda x, cf, hard: cf
apply_fn
)assert cf.shape == x.shape
relax.methods.clue.CLUEConfig
class relax.methods.clue.CLUEConfig (enc_sizes=[20, 16, 14, 12], dec_sizes=[12, 14, 16, 20], dropout_rate=0.1, encoded_size=5, lr=0.001, max_steps=500, step_size=0.01, vae_n_epochs=10, vae_batch_size=128, seed=0)
Base class for all config classes.
Parameters:
- enc_sizes (
List[int]
, default=[20, 16, 14, 12]) – Sequence of Encoder layer sizes. - dec_sizes (
List[int]
, default=[12, 14, 16, 20]) – Sequence of Decoder layer sizes. - dropout_rate (
float
, default=0.1) – Dropout rate - encoded_size (
int
, default=5) – Encoded size - lr (
float
, default=0.001) – Learning rate - max_steps (
int
, default=500) – Max steps - step_size (
float
, default=0.01) – Step size - vae_n_epochs (
int
, default=10) – Number of epochs for VAE - vae_batch_size (
int
, default=128) – Batch size for VAE - seed (
int
, default=0) – Seed for random number generator
relax.methods.clue.get_reconstruction_loss_fn
relax.methods.clue.get_reconstruction_loss_fn (dm)
= load_data('adult')
dm = get_reconstruction_loss_fn(dm)
reconstruction_loss = dm['test']
xs, _ = jrand.normal(jrand.PRNGKey(0), xs.shape)
cfs = reconstruction_loss(xs, cfs)
loss assert loss.shape == (xs.shape[0], len(dm.features))
/home/birk/code/jax-relax/relax/data_module.py:234: UserWarning: Passing `config` will have no effect.
warnings.warn("Passing `config` will have no effect.")
relax.methods.clue.CLUE
class relax.methods.clue.CLUE (config=None, vae=None, name=‘CLUE’)
Base class for parametric counterfactual modules.
Methods
set_apply_constraints_fn (apply_constraints_fn)
set_compute_reg_loss_fn (compute_reg_loss_fn)
apply_constraints (*args, **kwargs)
compute_reg_loss (*args, **kwargs)
save (path)
load_from_path (path)
before_generate_cf (*args, **kwargs)
generate_cf (*args, **kwargs)
= load_data('adult')
data = load_ml_module('adult').pred_fn
pred_fn = data['train']
xs_train, ys_train = data['test'] xs_test, ys_test
/home/birk/code/jax-relax/relax/data_module.py:234: UserWarning: Passing `config` will have no effect.
warnings.warn("Passing `config` will have no effect.")
= CLUE()
clue =128, epochs=5)
clue.train(data, batch_size clue.set_apply_constraints_fn(data.apply_constraints)
Epoch 1/5
191/191 ━━━━━━━━━━━━━━━━━━━━ 4s 11ms/step - loss: 0.1202
Epoch 2/5
191/191 ━━━━━━━━━━━━━━━━━━━━ 0s 769us/step - loss: 0.0694
Epoch 3/5
191/191 ━━━━━━━━━━━━━━━━━━━━ 0s 748us/step - loss: 0.0639
Epoch 4/5
191/191 ━━━━━━━━━━━━━━━━━━━━ 0s 743us/step - loss: 0.0621
Epoch 5/5
191/191 ━━━━━━━━━━━━━━━━━━━━ 0s 736us/step - loss: 0.0613
= clue.generate_cf(xs_train[0], pred_fn, rng_key=jrand.PRNGKey(0)) cf
= 100
n_tests = partial(clue.generate_cf, pred_fn=pred_fn)
partial_gen = jax.vmap(partial_gen)(xs_test[:n_tests], rng_key=jrand.split(jrand.PRNGKey(0), n_tests))
cfs
assert cfs.shape == xs_test[:100].shape
print("Validity: ", keras.metrics.binary_accuracy(
1 - pred_fn(xs_test[:100])).round(),
(
pred_fn(cfs[:, :]) ).mean())
Validity: 0.16