probability icon indicating copy to clipboard operation
probability copied to clipboard

Please add example on NUTS sampling from posterior for Bayesian Logistic Regression

Open dionman opened this issue 6 years ago • 3 comments

dionman avatar Oct 07 '19 10:10 dionman

"""Posterior inference with NUTS for Bayesian Logistic Regression.
"""
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability import edward2 as ed
import warnings
warnings.filterwarnings('ignore')
tfd = tfp.distributions

M = 100 #number of points
data_dim = 2
thetatrue = tf.convert_to_tensor(np.random.randn(data_dim), dtype=tf.float32) # true value of params

# Generate synthetic data
mu = np.array([0, 0])
cov = np.eye(2)
Xtrue = tf.convert_to_tensor(np.random.multivariate_normal(mu, cov, M), dtype=tf.float32)
Ytrue = ed.Bernoulli(logits=tf.tensordot(Xtrue, thetatrue, [[1], [0]]), name="Y")

with tf.Session() as sess:
    Xtrue = sess.run(Xtrue)
    thetatrue = sess.run(thetatrue)
    Ytrue = sess.run(Ytrue)

# Generative model for logistic regression
def logistic_regression(X0):
      theta0 = ed.Normal(loc=tf.constant(0., dtype=tf.float32), scale=tf.constant(1., dtype=tf.float32),
                            sample_shape=X0.shape[1], name="theta0")
      Y0 = ed.Bernoulli(logits=tf.tensordot(X0, theta0, [[1], [0]]), name="Y0")
      return Y0

log_joint = ed.make_log_joint_fn(logistic_regression)

inittheta = tf.convert_to_tensor(np.random.randn(data_dim), dtype=tf.float32) # set initial state for MCMC

def target_log_prob_fn(theta0):
        """Target log-probability as a function of states."""
        return log_joint(theta0=theta0, X0=Xtrue, Y0=Ytrue)

Nsamples = 1000 # final number of samples
Nburn = 1000     # number of tuning samples

# set up NUTS
mc_kernel = tfp.mcmc.NoUTurnSampler(
        target_log_prob_fn=target_log_prob_fn,
        step_size=0.01)
states, kernel_results = tfp.mcmc.sample_chain(
        num_results=Nsamples,
        current_state=[inittheta],
        kernel=mc_kernel,
        num_burnin_steps=Nburn)
thetas = states

# run the session to extract the samples
with tf.Session() as sess:
        thetas_, is_accepted_ = sess.run([thetas, kernel_results.is_accepted])
        accepted = np.sum(is_accepted_)

# Samples after burn-in
theta_samples = thetas_[0]
theta_samples_mean = np.mean(theta_samples, axis=0)

The implementation above seems to be working correctly. Though NoUTurnSampler seems to be noticeably slower compared to HamiltonianMonteCarlo

dionman avatar Oct 08 '19 13:10 dionman

Can anyone just brief me on this issue? I really want to contribute to this and close this as it's a very old issue. Just some tips would be enough to get going.

IMPranshu avatar Jan 18 '22 12:01 IMPranshu

I think the right way to do this would be to build a logisitic regression model that will be very similar to the linear regression model here using a joint distribution, and then use tfp.experimental.mcmc.windowed_adaptive_nuts to fit the model.

ColCarroll avatar Jan 18 '22 15:01 ColCarroll