Day 1 - Part 4: TensorFlow Essentials
Master 2 (203) in Financial Markets, Paris Dauphine - PSL University
2025-10-31
tf.function for optimized graphsWindows (CPU):
macOS Apple Silicon (M1/M2):
macOS Intel without AVX (official TF wheels require AVX):
# Options:
# 1) Use Google Colab (recommended for TF on older Intel Macs)
# 2) Use Keras 3 with PyTorch backend locally in the same conda env:
conda create -n mlpython python=3.11 -y
conda activate mlpython
python -m pip install --upgrade pip
pip install "keras>=3" torch
# Set backend env var
# macOS/Linux:
export KERAS_BACKEND=torch
# Windows (Powershell):
$env:KERAS_BACKEND = "torch"Verify TF:
pip uninstall -y jax jaxlibNotes: - Use the official selector at https://www.tensorflow.org/install for GPU/CUDA combos. - TensorFlow 2.x includes Keras: from tensorflow import keras.
import tensorflow as tf
# Tensors (immutable, like NumPy arrays on steroids)
a = tf.constant([[1., 2.], [3., 4.]], dtype=tf.float32)
b = tf.random.normal((2, 2))
# Eager by default (execute immediately)
print('sum =', tf.reduce_sum(a + b).numpy())
# Variables (stateful parameters)
w = tf.Variable(tf.random.normal((3, 1)))
print('w shape:', w.shape)import tensorflow as tf
import numpy as np
# Data: y = sin(x) + noise
X = np.linspace(-5, 5, 1000).astype('float32').reshape(-1, 1)
y = (np.sin(X) + 0.1*np.random.randn(*X.shape)).astype('float32')
# Model parameters
w1 = tf.Variable(tf.random.normal((1, 64)))
b1 = tf.Variable(tf.zeros((64,)))
w2 = tf.Variable(tf.random.normal((64, 64)))
b2 = tf.Variable(tf.zeros((64,)))
w3 = tf.Variable(tf.random.normal((64, 1)))
b3 = tf.Variable(tf.zeros((1,)))
optimizer = tf.keras.optimizers.Adam(1e-3)
@tf.function
def forward(x):
x = tf.nn.relu(tf.matmul(x, w1) + b1)
x = tf.nn.relu(tf.matmul(x, w2) + b2)
out = tf.matmul(x, w3) + b3
return out
@tf.function
def train_step(x, y_true):
with tf.GradientTape() as tape:
y_pred = forward(x)
loss = tf.reduce_mean((y_pred - y_true)**2)
grads = tape.gradient(loss, [w1, b1, w2, b2, w3, b3])
optimizer.apply_gradients(zip(grads, [w1, b1, w2, b2, w3, b3]))
return loss
# Train
for epoch in range(100):
loss = train_step(X, y)
if (epoch+1) % 20 == 0:
tf.print('Epoch', epoch+1, 'Loss', loss)import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from sklearn.datasets import make_moons
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# Data
X, y = make_moons(n_samples=1000, noise=0.1, random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# Model
model = keras.Sequential([
layers.Dense(32, activation='relu', input_shape=(2,)),
layers.Dropout(0.3),
layers.Dense(16, activation='relu'),
layers.Dense(1, activation='sigmoid')
])
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
# Train
history = model.fit(X_train, y_train, validation_split=0.2, epochs=100, batch_size=32, verbose=0)
# Evaluate
loss, acc = model.evaluate(X_test, y_test, verbose=0)
print(f'Test Accuracy: {acc:.3f}')import datetime
log_dir = 'logs/fit/' + datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
tb_cb = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
# Re-train with TensorBoard callback
model.fit(X_train, y_train, validation_split=0.2, epochs=20, batch_size=32, callbacks=[tb_cb])
# Then run in a terminal:
# tensorboard --logdir logs/fit# Save whole model
model.save('tf_model.keras')
# Load and predict
loaded = tf.keras.models.load_model('tf_model.keras')
probs = loaded.predict(X_test[:5])
print(probs[:5])
# Convert to TF Lite (mobile)
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
open('model.tflite', 'wb').write(tflite_model)tf.data pipelines for efficient input@tf.function to speed up eager code