Stat 154 Hw 2

pdf

School

University of California, Berkeley *

*We aren’t endorsed by this school

Course

154

Subject

Computer Science

Date

Feb 20, 2024

Type

pdf

Pages

3

Uploaded by GeneralSteel1537

Report
Code Chunks: Q1) import numpy as np from sklearn.datasets import load_boston from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler boston = load_boston() X, y = boston.data, boston.target features = [0, 3, 2, 12, 9, 5] # Indices of the selected features X = X[:, features] # Selecting the features y = y.reshape(-1, 1) # Reshaping y to make it a column vector # Append a column of ones to X for the intercept term X = np.hstack((np.ones((X.shape[0], 1)), X)) beta_hat = np.linalg.inv(X.T @ X) @ X.T @ y # Predicted values y_pred = X @ beta_hat RSS = np.linalg.norm(y - y_pred)**2 print("Beta Hat (Estimated Coefficients):\n", beta_hat) print("RSS:", RSS)
Q2) from scipy.linalg import solve_triangular boston = load_boston() X, y = boston.data, boston.target features = [0, 3, 2, 12, 9, 5] # Indices of the selected features X = X[:, features] # Selecting the features y = y.reshape(-1, 1) # Reshaping y to make it a column vector # Append a column of ones to X for the intercept term X = np.hstack((np.ones((X.shape[0], 1)), X)) # QR decomposition Q, R = np.linalg.qr(X) # Transform the target variable y_transformed = Q.T @ y # Perform backward substitution to solve Ru = Q^T y for u u = solve_triangular(R, y_transformed) # Calculate the least squares estimator b_beta_qr beta_hat_qr = u # Predicted values y_pred_qr = X @ beta_hat_qr RSS_qr = np.linalg.norm(y - y_pred_qr)**2 print("Beta Hat QR (Least Squares Estimator):\n", beta_hat_qr) print("RSS with QR Decomposition and Backward Substitution:", RSS_qr)
Q3) boston = load_boston() X, y = boston.data, boston.target features = [0, 3, 2, 12, 9, 5] # Indices of the selected features X = X[:, features] # Selecting the features y = y.reshape(-1, 1) # Reshaping y to make it a column vector # Append a column of ones to X for the intercept term X = np.hstack((np.ones((X.shape[0], 1)), X)) # Initialize coefficients randomly or with zeros np.random.seed(0) beta_gd = np.random.randn(X.shape[1], 1) # Initialize with random values # Define the learning rate alpha = 0.0000001 # Define convergence criterion convergence_threshold = 0.001 max_iterations = 1000 # Gradient descent algorithm for i in range(max_iterations): # Compute the gradient gradient = 2 * X.T @ (X @ beta_gd - y) # Update the coefficients beta_gd -= alpha * gradient # Check convergence criterion if np.linalg.norm(gradient) < convergence_threshold: break # Predicted values y_pred_gd = X @ beta_gd RSS_gd = np.linalg.norm(y - y_pred_gd)**2 print("Beta Hat GD (Least Squares Estimator):\n", beta_gd) print("RSS with Gradient Descent:", RSS_gd)
Your preview ends here
Eager to read complete document? Join bartleby learn and gain access to the full version
  • Access to all documents
  • Unlimited textbook solutions
  • 24/7 expert homework help