Coursera: Machine Learning-- Andrew NG (Week 2) [Assignment Solution]





These solutions are for reference only.
try to solve on your own
but if you get stuck in between than you can refer these solutions

--------------------------------------------------------------------

function A = warmUpExercise()
% function [y1,...,yN] = myfun(x1,...,xN)
% above function 'myfun' takes argument (x1,...,xN) and returns y1,...,yN
% Return the 5x5 identity matrix in octave

A = eye(5);
end



function plotData(x, y)
%PLOTDATA Plots the data points x and y into a new figure 

plot(x, y, 'rx', 'MarkerSize', 10); % Plot the data
% Hint: You can use the 'rx' option with plot to have the markers
%       appear as red crosses. Furthermore, you can make the
%       markers larger by using plot(..., 'rx', 'MarkerSize', 10);

ylabel('Profit in $10,000s'); % Set the y ? axis label
xlabel('Population of City in 10,000s'); % Set the x ? axis label

figure; % open a new figure window

end



function J = computeCost(X, y, theta)
% J = COMPUTECOST(X, y, theta) computes the cost for linear regression 
% using theta as the parameter for linear regression to fit the data 
% points in X and y

m = length(y);

i = 1:m;
J = (1/(2*m)) * sum( ((theta(1) + theta(2) .* X(i,2)) - y(i)) .^ 2); % Un-Vectorized

end



function [theta, J_history] = gradientDescent(X, y, theta, alpha, num_iters)
% theta = GRADIENTDESENT(X, y, theta, alpha, num_iters) updates theta by 
% taking num_iters gradient steps with learning rate alpha

m = length(y);
J_history = zeros(num_iters, 1);

for iter = 1:num_iters
    
    k = 1:m;
    t1 = sum((theta(1) + theta(2) .* X(k,2)) - y(k)); % Un-Vectorized
    t2 = sum(((theta(1) + theta(2) .* X(k,2)) - y(k)) .* X(k,2)); % Un-Vectorized
    
    theta(1) = theta(1) - (alpha/m) * (t1);
    theta(2) = theta(2) - (alpha/m) * (t2);
    
    % Save the cost J in every iteration    
    J_history(iter) = computeCost(X, y, theta);

end

end
function J = computeCostMulti(X, y, theta)
% J = COMPUTECOSTMULTI(X, y, theta) computes the cost of using theta as the
% parameter for linear regression to fit the data points in X and y

m = length(y); % number of training examples

J = (1/(2*m)) * (X * theta - y)' * (X * theta - y); % Vectorized

end




function [theta, J_history] = gradientDescentMulti(X, y, theta, alpha, num_iters)
% theta = GRADIENTDESCENTMULTI(x, y, theta, alpha, num_iters) updates theta by
% taking num_iters gradient steps with learning rate alpha

m = length(y);
J_history = zeros(num_iters, 1);

for iter = 1:num_iters
    theta = theta - alpha * (1/m) * (((X*theta) - y)' * X)'; % Vectorized  
    J_history(iter) = computeCostMulti(X, y, theta);
end

end



function [X_norm, mu, sigma] = featureNormalize(X)
% FEATURENORMALIZE(X) returns a normalized version of X where
% the mean value of each feature is 0 and the standard deviation
% is 1. This is often a good preprocessing step to do when
% working with learning algorithms.

mu = mean(X);
sigma = std(X);

t = ones(length(X), 1);
X_norm = (X - (t * mu)) ./ (t * sigma); % Vectorized

end



function [theta] = normalEqn(X, y)
%   NORMALEQN(X,y) computes the closed-form solution to linear 
%   regression using the normal equations.

theta = pinv(X' * X) * (X' * y); % Vectorized

end


darkmode