-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathimplementations.py
More file actions
141 lines (102 loc) · 3.69 KB
/
implementations.py
File metadata and controls
141 lines (102 loc) · 3.69 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
# -*- coding: utf-8 -*-
"""
Project 1 method implementations.
Authors: Victor Faramond, Dario Anongba Varela, Mathieu Schopfer
"""
import numpy as np
from costs import compute_loss, compute_loss_neg_log_likelihood
from helpers import compute_gradient, batch_iter, sigmoid
def least_squares_gd(y, tx, initial_w, max_iters, gamma):
""" Linear regression using gradient descent
"""
# if initial_w is None, we initialize it to a zeros vector
if (initial_w is None):
initial_w = np.zeros(tx.shape[1])
# Define parameters to store weight and loss
loss = 0
w = initial_w
for n_iter in range(max_iters):
# compute gradient and loss
gradient = compute_gradient(y, tx, w)
loss = compute_loss(y, tx, w)
# update w by gradient
w -= gamma * gradient
return w, loss
def least_squares_sgd(y, tx, initial_w, max_iters, gamma):
""" Linear regression using stochastic gradient descent
"""
# if initial_w is None, we initialize it to a zeros vector
if (initial_w is None):
initial_w = np.zeros(tx.shape[1])
# Define parameters of the algorithm
batch_size = 1
# Define parameters to store w and loss
loss = 0
w = initial_w
for n_iter, [mb_y, mb_tx] in enumerate(batch_iter(y, tx, batch_size, max_iters)):
# compute gradient and loss
gradient = compute_gradient(mb_y, mb_tx, w)
loss = compute_loss(y, tx, w)
# update w by gradient
w -= gamma * gradient
return w, loss
def least_squares(y, tx):
""" Least squares regression using normal equations
"""
x_t = tx.T
w = np.dot(np.dot(np.linalg.inv(np.dot(x_t, tx)), x_t), y)
loss = compute_loss(y, tx, w)
return w, loss
def ridge_regression(y, tx, lambda_):
""" Ridge regression using normal equations
"""
x_t = tx.T
lambd = lambda_ * 2 * len(y)
w = np.dot(np.dot(np.linalg.inv(np.dot(x_t, tx) + lambd * np.eye(tx.shape[1])), x_t), y)
loss = compute_loss(y, tx, w)
return w, loss
def learning_by_gradient_descent(y, tx, w, gamma):
"""
Do one step of gradient descen using logistic regression.
Return the loss and the updated w.
"""
loss = compute_loss_neg_log_likelihood(y, tx, w)
gradient = np.dot(tx.T, sigmoid(np.dot(tx, w)) - y)
w -= gamma * gradient
return w, loss
def logistic_regression(y, tx, initial_w, max_iters, gamma):
"""Logistic regression"""
if (initial_w is None):
initial_w = np.zeros(tx.shape[1])
w = initial_w
y = (1 + y) / 2
losses = []
threshold = 0.1
# start the logistic regression
for iter in range(max_iters):
# get loss and update w.
w, loss = learning_by_gradient_descent(y, tx, w, gamma)
losses.append(loss)
# converge criteria
if len(losses) > 1 and np.abs(losses[-1] - losses[-2]) < threshold:
break
return w, loss
def reg_logistic_regression(y, tx, lambda_, initial_w, max_iters, gamma):
"""Regularized logistic regression"""
if (initial_w is None):
initial_w = np.zeros(tx.shape[1])
w = initial_w
y = (1 + y) / 2
losses = []
threshold = 0.1
# start the logistic regression
for iter in range(max_iters):
# get loss and update w.
w, loss = learning_by_gradient_descent(y, tx, w, gamma)
losses.append(loss)
# converge criteria
if len(losses) > 1 and np.abs(losses[-1] - losses[-2]) < threshold:
break
norm = sum(w ** 2)
cost = w + lambda_ * norm / (2 * np.shape(w)[0])
return w, cost