# Template for exercise 4.1 import numpy as np import matplotlib.pyplot as plt # Load the observations data = np.loadtxt('ex4_1_data.txt') x_obs = data[:,0] y_obs = data[:,1] N_train = 50 x_train = x_obs[:N_train] y_train = y_obs[:N_train] N_test = 10 x_test = x_obs[N_train:N_train+N_test] y_test = y_obs[N_train:N_train+N_test] x_range = (-5, 5) # Possible values of x are in this range # Basis function parameters num_basis_functions = 11 centers = np.linspace(x_range[0], x_range[1], num_basis_functions) lambdaval = 0.17 # You can use here assume the correct basis function centers and lambda ... def rbf(x, centers, lambdaval): # Radial Basis Function output for input x # # Inputs: # x : input points (one-dimensional array) # centers : basis function centers (one-dimensional array) # lambdaval : basis function width (scalar) # # Output: # Radial Basis Functions evaluated at x (two-dimensional array with len(x) # rows and len(centers) columns) d = x[:,np.newaxis] - centers[np.newaxis,:] y = np.exp(-0.5 * (d ** 2) / lambdaval) return y def bayesian_linear_regression(phi_x, y, alpha, beta): # Bayesian linear parameter model # # Inputs: # phi_x : the basis function applied to x-data (two-dimensional array) # y : y-data (one-dimensional array) # alpha : the precision of the weight prior distribution (scalar) # beta : the precision of the assumed gaussian noise (scalar) # # Output: # the posterior mean, the posterior covariance, the log marginal likelihood N, B = phi_x.shape # Add here code to compute: # m = the posterior mean of w # S = the posterior covariance of w # S_inv = the inverse of S S_inv = ??? S = ??? # Note: This is a corrected version of equation 18.1.19 from Barbers book d = beta * np.dot(phi_x.T, y) m = ??? log_likelihood = 0.5 * (-beta * np.dot(y, y) + d @ S @ d + np.log(np.linalg.det(2 * np.pi * S)) + B * np.log(alpha) + N * np.log(beta) - N * np.log(2 * np.pi)) return m, S, log_likelihood # Specify possible values for the alpha and beta parameters to test alphas = np.logspace(-3, 3, 100) betas = np.logspace(-3, 3, 100) # Grid search over possible values of alpha and beta for alpha in alphas: for beta in betas: # Use here functions rbf and bayesian_linear_regression to compute the # log marginal likelihood for given alpha and beta # What are the optimal values of alpha and beta, that maximize the marginal # likelihood? best_alpha = ??? best_beta = ??? # Fit the model one more time using the optimal alpha and beta and the training # data to get m for the optimal model best_m = ??? # Compute the final regression function x_coord = np.linspace(x_range[0], x_range[1], 100) # Compute the predicted values for inputs in x_coord using best_m y_mean = ??? # Plot the final learned regression function, together with the samples plt.plot(x_coord, y_mean, label="learned model") plt.plot(x_train, y_train, 'kx', label="training data") plt.plot(x_test, y_test, 'rx', label="testing data") # Make predictions for inputs in the test data, so that you get # predictions 'y_pred' for inputs in x_test. y_pred = ??? # Plot the predictions plt.plot(x_test, y_pred, 'gx', label="testing predictions") # Compute the mean squared prediction error for the test data. mse_test = ??? plt.legend() plt.title("ML-II: $\\alpha$=%.3f, $\\beta$=%.3f, mse=%.4f" % (best_alpha, best_beta, mse_test)) plt.show()