= make_classification(
X, y =5000, n_features=10, n_informative=5, random_state=42)
n_samples
= train_test_split(X, y, random_state=42) X_train, X_test, y_train, y_test
ML Module
relax.ml_model.MLP
class relax.ml_model.MLP (sizes, output_size=2, dropout_rate=0.3, use_batch_norm=False, last_activation=‘softmax’, **kwargs)
MLP model with multiple MLP blocks and a dense layer at the end.
relax.ml_model.MLPBlock
class relax.ml_model.MLPBlock (output_size, dropout_rate=0.3, use_batch_norm=False)
MLP block with leaky relu activation and dropout/batchnorm.
relax.ml_model.MLModuleConfig
class relax.ml_model.MLModuleConfig (sizes=[64, 32, 16], output_size=2, dropout_rate=0.3, lr=0.001, opt_name=‘adam’, loss=‘sparse_categorical_crossentropy’, metrics=[‘accuracy’])
Configurator of MLModule
.
Parameters:
- sizes (
typing.List[int]
, default=[64, 32, 16]) – List of hidden layer sizes. - output_size (
<class 'int'>
, default=2) – The number of output classes. - dropout_rate (
<class 'float'>
, default=0.3) – Dropout rate. - lr (
<class 'float'>
, default=0.001) – Learning rate. - opt_name (
<class 'str'>
, default=adam) – Optimizer name. - loss (
<class 'str'>
, default=sparse_categorical_crossentropy) – Loss function name. - metrics (
typing.List[str]
, default=[‘accuracy’]) – List of metrics names.
relax.ml_model.MLModule
class relax.ml_model.MLModule (config=None, model=None, name=None)
Base class for all modules.
Methods
is_trained ()
train (data, batch_size=128, epochs=10, **fit_kwargs)
Train the module.
= MLModule(
model =[64, 32, 16],)
MLModuleConfig(sizes
)=5)
model.train((X_train, y_train), epochsassert model.is_trained
Epoch 1/5
30/30 ━━━━━━━━━━━━━━━━━━━━ 2s 27ms/step - accuracy: 0.5601 - loss: 1.7022
Epoch 2/5
30/30 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.7016 - loss: 0.7342
Epoch 3/5
30/30 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.7190 - loss: 0.6272
Epoch 4/5
30/30 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.7522 - loss: 0.5503
Epoch 5/5
30/30 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.7732 - loss: 0.4973
'tmp/model') model.save(
= MLModule.load_from_path('tmp/model')
model_1 assert model_1.is_trained
assert np.allclose(model_1.pred_fn(X_test), model.pred_fn(X_test))
# models = []
# for data in DEFAULT_DATA_CONFIGS.keys():
# rf_acc, model_acc = train_ml_model_and_rf(data)
# if rf_acc > model_acc:
# models.append((data, rf_acc, model_acc))
# data = "dummy"
# dm = load_data(data)
# file_path = f"assets/{data}/model/model.keras"
# conf_path = f"assets/{data}/model/config.json"
# ckpt_cb = keras.callbacks.ModelCheckpoint(
# filepath=file_path,
# monitor='val_accuracy',
# mode='max',
# save_best_only=True
# )
# train_xs, train_ys = dm['train']
# test_xs, test_ys = dm['test']
# model = MLModule({
# 'sizes': [128, 64, 32, 16],
# 'dropout_rate': 0.3, 'lr': 0.001,
# 'opt_name': 'adamw'
# }).train(
# dm, validation_data=dm['test'], callbacks=[ckpt_cb], batch_size=64, epochs=10
# )
# model.config.save(conf_path)
# # Load the best model
# model = MLModule.load_from_path(f"assets/{data}/model")
# rf = RandomForestClassifier().fit(train_xs, train_ys.reshape(-1))
# rf_acc = accuracy_score(test_ys, rf.predict(test_xs))
# model_acc = accuracy_score(test_ys, model.pred_fn(test_xs).argmax(axis=1))
# rf_acc, model_acc
Load ML Module
TODO: Need test cases
relax.ml_model.load_ml_module
relax.ml_model.load_ml_module (name)
Load the ML module
relax.ml_model.download_ml_module
relax.ml_model.download_ml_module (name, path=None)
for name in DEFAULT_DATA_CONFIGS.keys():
= load_data(name)
dm = load_ml_module(name)
ml_model = dm['train']
X_train, y_train = dm['test']
X_test, y_test = accuracy_score(y_test, ml_model.pred_fn(X_test).argmax(axis=1)) model_acc
AutoEncoder
relax.ml_model.AutoEncoder
class relax.ml_model.AutoEncoder (enc_sizes, dec_sizes, output_size, dropout_rate=0.2, last_activation=‘sigmoid’, name=‘autoencoder’, **kwargs)
A model grouping layers into an object with training/inference features.
There are three ways to instantiate a Model
:
With the “Functional API”
You start from Input
, you chain layer calls to specify the model’s forward pass, and finally you create your model from inputs and outputs:
= keras.Input(shape=(37,))
inputs = keras.layers.Dense(32, activation="relu")(inputs)
x = keras.layers.Dense(5, activation="softmax")(x)
outputs = keras.Model(inputs=inputs, outputs=outputs) model
Note: Only dicts, lists, and tuples of input tensors are supported. Nested inputs are not supported (e.g. lists of list or dicts of dict).
A new Functional API model can also be created by using the intermediate tensors. This enables you to quickly extract sub-components of the model.
Example:
= keras.Input(shape=(None, None, 3))
inputs = keras.layers.RandomCrop(width=128, height=128)(inputs)
processed = keras.layers.Conv2D(filters=32, kernel_size=3)(processed)
conv = keras.layers.GlobalAveragePooling2D()(conv)
pooling = keras.layers.Dense(10)(pooling)
feature
= keras.Model(inputs, feature)
full_model = keras.Model(processed, conv)
backbone = keras.Model(conv, feature) activations
Note that the backbone
and activations
models are not created with keras.Input
objects, but with the tensors that originate from keras.Input
objects. Under the hood, the layers and weights will be shared across these models, so that user can train the full_model
, and use backbone
or activations
to do feature extraction. The inputs and outputs of the model can be nested structures of tensors as well, and the created models are standard Functional API models that support all the existing APIs.
By subclassing the Model
class
In that case, you should define your layers in __init__()
and you should implement the model’s forward pass in call()
.
class MyModel(keras.Model):
def __init__(self):
super().__init__()
self.dense1 = keras.layers.Dense(32, activation="relu")
self.dense2 = keras.layers.Dense(5, activation="softmax")
def call(self, inputs):
= self.dense1(inputs)
x return self.dense2(x)
= MyModel() model
If you subclass Model
, you can optionally have a training
argument (boolean) in call()
, which you can use to specify a different behavior in training and inference:
class MyModel(keras.Model):
def __init__(self):
super().__init__()
self.dense1 = keras.layers.Dense(32, activation="relu")
self.dense2 = keras.layers.Dense(5, activation="softmax")
self.dropout = keras.layers.Dropout(0.5)
def call(self, inputs, training=False):
= self.dense1(inputs)
x = self.dropout(x, training=training)
x return self.dense2(x)
= MyModel() model
Once the model is created, you can config the model with losses and metrics with model.compile()
, train the model with model.fit()
, or use the model to do prediction with model.predict()
.
With the Sequential
class
In addition, keras.Sequential
is a special case of model where the model is purely a stack of single-input, single-output layers.
= keras.Sequential([
model =(None, None, 3)),
keras.Input(shape=32, kernel_size=3),
keras.layers.Conv2D(filters ])
= AutoEncoder([10, 5], [5, 10], output_size=10, last_activation=None)
ae compile(optimizer='adam', loss='mse') ae.
=5, batch_size=128) ae.fit(X_train, X_train, epochs
Epoch 1/5
6/6 ━━━━━━━━━━━━━━━━━━━━ 2s 162ms/step - loss: 0.6734
Epoch 2/5
6/6 ━━━━━━━━━━━━━━━━━━━━ 0s 973us/step - loss: 0.5926
Epoch 3/5
6/6 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - loss: 0.5185
Epoch 4/5
6/6 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - loss: 0.4764
Epoch 5/5
6/6 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - loss: 0.4179
<keras.src.callbacks.history.History>