-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathOverFitting.py
More file actions
92 lines (78 loc) · 3.72 KB
/
OverFitting.py
File metadata and controls
92 lines (78 loc) · 3.72 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
import d2lzh as d2l
from mxnet import autograd, gluon, nd
from mxnet.gluon import data as gdata, loss as gloss, nn
def GoodFitting(): # Just Fitting, Third Oder Polynomial
n_train, n_test, true_w, true_b = 100, 100, [1.2, -3.4, 5.6], 5
features = nd.random.normal(shape=(n_train + n_test, 1))
poly_features = nd.concat(features, nd.power(features, 2),
nd.power(features, 3))
labels = (true_w[0] * poly_features[:, 0] + true_w[1] * poly_features[:, 1]
+ true_w[2] * poly_features[:, 2] + true_b)
labels += nd.random.normal(scale=0.1, shape=labels.shape)
fit_and_plot(poly_features[:n_train, :], poly_features[n_train:, :],
labels[:n_train], labels[n_train:])
def UnderFitting(): # Under Fitting, First Oder Polynomial
n_train, n_test, true_w, true_b = 100, 100, [1.2, -3.4, 5.6], 5
features = nd.random.normal(shape=(n_train + n_test, 1))
poly_features = nd.concat(features, nd.power(features, 2),
nd.power(features, 3))
labels = (true_w[0] * poly_features[:, 0] + true_w[1] * poly_features[:, 1]
+ true_w[2] * poly_features[:, 2] + true_b)
labels += nd.random.normal(scale=0.1, shape=labels.shape)
fit_and_plot(features[:n_train, :], features[n_train:, :],
labels[:n_train], labels[n_train:])
def OverFitting(): # Over Fitting, First Oder Polynomial
n_train, n_test, true_w, true_b = 100, 100, [1.2, -3.4, 5.6], 5
features = nd.random.normal(shape=(n_train + n_test, 1))
poly_features = nd.concat(features, nd.power(features, 2),
nd.power(features, 3))
labels = (true_w[0] * poly_features[:, 0] + true_w[1] * poly_features[:, 1]
+ true_w[2] * poly_features[:, 2] + true_b)
labels += nd.random.normal(scale=0.1, shape=labels.shape)
fit_and_plot(features[0:2, :], features[n_train:, :],
labels[0:2], labels[n_train:])
def main():
#GoodFitting()
#UnderFitting()
OverFitting()
def semilogy(x_vals, y_vals, x_label, y_label, x2_vals=None, y2_vals=None,
legend=None, figsize=(3.5, 2.5)):
d2l.set_figsize(figsize)
d2l.plt.xlabel(x_label)
d2l.plt.ylabel(y_label)
d2l.plt.semilogy(x_vals, y_vals)
if x2_vals and y2_vals:
d2l.plt.semilogy(x2_vals, y2_vals, linestyle=':')
d2l.plt.legend(legend)
def fit_and_plot(train_features, test_features, train_labels, test_labels):
num_epochs, loss = 100, gloss.L2Loss()
net = nn.Sequential()
net.add(nn.Dense(1))
net.initialize()
batch_size = min(10, train_labels.shape[0])
train_iter = gdata.DataLoader(gdata.ArrayDataset(
train_features, train_labels), batch_size, shuffle=True)
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.01})
train_ls, test_ls = [], []
for k in range(num_epochs):
i = 0
for x, y in train_iter:
i += 1
#print(k, i, y.shape)
with autograd.record():
l = loss(net(x), y)
l.backward()
trainer.step(batch_size)
train_ls.append(loss(net(train_features),
train_labels).mean().asscalar())
test_ls.append(loss(net(test_features),
test_labels).mean().asscalar())
#print('train ', train_ls)
#print('train ', train_ls, 'test', test_ls)
print('final epoch: train loss ', train_ls[-1], 'test loss', test_ls[-1])
semilogy(range(1, num_epochs+1), train_ls, 'epochs', 'loss',
range(1, num_epochs+1), test_ls, ['train', 'test'])
print('weight:', net[0].weight.data().asnumpy(),
'\nbias:', net[0].bias.data().asnumpy())
if __name__ == '__main__':
main()