Skip to content

Commit edf393e

Browse files
qoocrabkkweon
authored andcommitted
Update Lab-02, Lab-03 (hunkim#240)
* Update lab-03-X-minimizing_cost_tf_gradient.py 1. The value of W changes because of 'apply_gradients', not because of 'gradient'. Therefore, it is not necessary to make W output twice. 2. Modified output comment. 3. 'train = optimizer.minimize (cost)' is an unnecessary sentence. * Modify the code for readability. (Black formatting)
1 parent 931b1c3 commit edf393e

7 files changed

+134
-138
lines changed

lab-02-1-linear_regression.py

+15-19
Original file line numberDiff line numberDiff line change
@@ -9,41 +9,37 @@
99
# Try to find values for W and b to compute y_data = x_data * W + b
1010
# We know that W should be 1 and b should be 0
1111
# But let TensorFlow figure it out
12-
W = tf.Variable(tf.random_normal([1]), name='weight')
13-
b = tf.Variable(tf.random_normal([1]), name='bias')
12+
W = tf.Variable(tf.random_normal([1]), name="weight")
13+
b = tf.Variable(tf.random_normal([1]), name="bias")
1414

1515
# Our hypothesis XW+b
1616
hypothesis = x_train * W + b
1717

1818
# cost/loss function
1919
cost = tf.reduce_mean(tf.square(hypothesis - y_train))
2020

21-
# Minimize
22-
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
23-
train = optimizer.minimize(cost)
21+
# optimizer
22+
train = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(cost)
2423

2524
# Launch the graph in a session.
26-
sess = tf.Session()
27-
# Initializes global variables in the graph.
28-
sess.run(tf.global_variables_initializer())
25+
with tf.Session() as sess:
26+
# Initializes global variables in the graph.
27+
sess.run(tf.global_variables_initializer())
2928

30-
# Fit the line
31-
for step in range(2001):
32-
sess.run(train)
33-
if step % 20 == 0:
34-
print(step, sess.run(cost), sess.run(W), sess.run(b))
29+
# Fit the line
30+
for step in range(2001):
31+
_, cost_val, W_val, b_val = sess.run([train, cost, W, b])
3532

36-
# Learns best fit W:[ 1.], b:[ 0.]
33+
if step % 20 == 0:
34+
print(step, cost_val, W_val, b_val)
3735

38-
'''
36+
# Learns best fit W:[ 1.], b:[ 0.]
37+
"""
3938
0 2.82329 [ 2.12867713] [-0.85235667]
4039
20 0.190351 [ 1.53392804] [-1.05059612]
4140
40 0.151357 [ 1.45725465] [-1.02391243]
4241
...
43-
44-
1920 1.77484e-05 [ 1.00489295] [-0.01112291]
45-
1940 1.61197e-05 [ 1.00466311] [-0.01060018]
4642
1960 1.46397e-05 [ 1.004444] [-0.01010205]
4743
1980 1.32962e-05 [ 1.00423515] [-0.00962736]
4844
2000 1.20761e-05 [ 1.00403607] [-0.00917497]
49-
'''
45+
"""

lab-02-2-linear_regression_feed.py

+52-55
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,8 @@
33
tf.set_random_seed(777) # for reproducibility
44

55
# Try to find values for W and b to compute Y = W * X + b
6-
W = tf.Variable(tf.random_normal([1]), name='weight')
7-
b = tf.Variable(tf.random_normal([1]), name='bias')
6+
W = tf.Variable(tf.random_normal([1]), name="weight")
7+
b = tf.Variable(tf.random_normal([1]), name="bias")
88

99
# placeholders for a tensor that will be always fed using feed_dict
1010
# See http://stackoverflow.com/questions/36693740/
@@ -17,64 +17,61 @@
1717
# cost/loss function
1818
cost = tf.reduce_mean(tf.square(hypothesis - Y))
1919

20-
# Minimize
21-
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
22-
train = optimizer.minimize(cost)
20+
# optimizer
21+
train = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(cost)
2322

2423
# Launch the graph in a session.
25-
sess = tf.Session()
26-
# Initializes global variables in the graph.
27-
sess.run(tf.global_variables_initializer())
24+
with tf.Session() as sess:
25+
# Initializes global variables in the graph.
26+
sess.run(tf.global_variables_initializer())
2827

29-
# Fit the line
30-
for step in range(2001):
31-
cost_val, W_val, b_val, _ = \
32-
sess.run([cost, W, b, train],
33-
feed_dict={X: [1, 2, 3], Y: [1, 2, 3]})
34-
if step % 20 == 0:
35-
print(step, cost_val, W_val, b_val)
28+
# Fit the line
29+
for step in range(2001):
30+
_, cost_val, W_val, b_val = sess.run(
31+
[train, cost, W, b], feed_dict={X: [1, 2, 3], Y: [1, 2, 3]}
32+
)
33+
if step % 20 == 0:
34+
print(step, cost_val, W_val, b_val)
3635

37-
# Learns best fit W:[ 1.], b:[ 0]
38-
'''
39-
...
40-
1980 1.32962e-05 [ 1.00423515] [-0.00962736]
41-
2000 1.20761e-05 [ 1.00403607] [-0.00917497]
42-
'''
36+
# Testing our model
37+
print(sess.run(hypothesis, feed_dict={X: [5]}))
38+
print(sess.run(hypothesis, feed_dict={X: [2.5]}))
39+
print(sess.run(hypothesis, feed_dict={X: [1.5, 3.5]}))
4340

44-
# Testing our model
45-
print(sess.run(hypothesis, feed_dict={X: [5]}))
46-
print(sess.run(hypothesis, feed_dict={X: [2.5]}))
47-
print(sess.run(hypothesis, feed_dict={X: [1.5, 3.5]}))
41+
# Learns best fit W:[ 1.], b:[ 0]
42+
"""
43+
0 3.5240757 [2.2086694] [-0.8204183]
44+
20 0.19749963 [1.5425726] [-1.0498911]
45+
...
46+
1980 1.3360998e-05 [1.0042454] [-0.00965055]
47+
2000 1.21343355e-05 [1.0040458] [-0.00919707]
48+
[5.0110054]
49+
[2.500915]
50+
[1.4968792 3.5049512]
51+
"""
4852

49-
'''
50-
[ 5.0110054]
51-
[ 2.50091505]
52-
[ 1.49687922 3.50495124]
53-
'''
53+
# Fit the line with new training data
54+
for step in range(2001):
55+
_, cost_val, W_val, b_val = sess.run(
56+
[train, cost, W, b],
57+
feed_dict={X: [1, 2, 3, 4, 5], Y: [2.1, 3.1, 4.1, 5.1, 6.1]},
58+
)
59+
if step % 20 == 0:
60+
print(step, cost_val, W_val, b_val)
5461

62+
# Testing our model
63+
print(sess.run(hypothesis, feed_dict={X: [5]}))
64+
print(sess.run(hypothesis, feed_dict={X: [2.5]}))
65+
print(sess.run(hypothesis, feed_dict={X: [1.5, 3.5]}))
5566

56-
# Fit the line with new training data
57-
for step in range(2001):
58-
cost_val, W_val, b_val, _ = \
59-
sess.run([cost, W, b, train],
60-
feed_dict={X: [1, 2, 3, 4, 5],
61-
Y: [2.1, 3.1, 4.1, 5.1, 6.1]})
62-
if step % 20 == 0:
63-
print(step, cost_val, W_val, b_val)
64-
65-
# Learns best fit W:[ 1.], b:[ 1.1]
66-
'''
67-
1980 2.90429e-07 [ 1.00034881] [ 1.09874094]
68-
2000 2.5373e-07 [ 1.00032604] [ 1.09882331]
69-
'''
70-
71-
# Testing our model
72-
print(sess.run(hypothesis, feed_dict={X: [5]}))
73-
print(sess.run(hypothesis, feed_dict={X: [2.5]}))
74-
print(sess.run(hypothesis, feed_dict={X: [1.5, 3.5]}))
75-
76-
'''
77-
[ 6.10045338]
78-
[ 3.59963846]
79-
[ 2.59931231 4.59996414]
80-
'''
67+
# Learns best fit W:[ 1.], b:[ 1.1]
68+
"""
69+
0 1.2035878 [1.0040361] [-0.00917497]
70+
20 0.16904518 [1.2656431] [0.13599995]
71+
...
72+
1980 2.9042917e-07 [1.00035] [1.0987366]
73+
2000 2.5372992e-07 [1.0003271] [1.0988194]
74+
[6.1004534]
75+
[3.5996385]
76+
[2.5993123 4.599964 ]
77+
"""
+21-18
Original file line numberDiff line numberDiff line change
@@ -1,34 +1,37 @@
11
# From https://www.tensorflow.org/get_started/get_started
22
import tensorflow as tf
33

4+
# training data
5+
x_train = [1, 2, 3, 4]
6+
y_train = [0, -1, -2, -3]
7+
48
# Model parameters
5-
W = tf.Variable([.3], tf.float32)
6-
b = tf.Variable([-.3], tf.float32)
9+
W = tf.Variable([0.3], tf.float32)
10+
b = tf.Variable([-0.3], tf.float32)
711

812
# Model input and output
913
x = tf.placeholder(tf.float32)
1014
y = tf.placeholder(tf.float32)
1115

12-
linear_model = x * W + b
16+
hypothesis = x * W + b
1317

1418
# cost/loss function
15-
loss = tf.reduce_sum(tf.square(linear_model - y)) # sum of the squares
19+
cost = tf.reduce_sum(tf.square(hypothesis - y)) # sum of the squares
1620

1721
# optimizer
18-
optimizer = tf.train.GradientDescentOptimizer(0.01)
19-
train = optimizer.minimize(loss)
22+
train = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(cost)
2023

21-
# training data
22-
x_train = [1, 2, 3, 4]
23-
y_train = [0, -1, -2, -3]
24+
# training
25+
with tf.Session() as sess:
26+
sess.run(tf.global_variables_initializer())
27+
28+
for step in range(1000):
29+
sess.run(train, {x: x_train, y: y_train})
2430

25-
# training loop
26-
init = tf.global_variables_initializer()
27-
sess = tf.Session()
28-
sess.run(init) # reset values to wrong
29-
for i in range(1000):
30-
sess.run(train, {x: x_train, y: y_train})
31+
# evaluate training accuracy
32+
W_val, b_val, cost_val = sess.run([W, b, cost], feed_dict={x: x_train, y: y_train})
33+
print(f"W: {W_val} b: {b_val} cost: {cost_val}")
3134

32-
# evaluate training accuracy
33-
curr_W, curr_b, curr_loss = sess.run([W, b, loss], {x: x_train, y: y_train})
34-
print("W: %s b: %s loss: %s" % (curr_W, curr_b, curr_loss))
35+
"""
36+
W: [-0.9999969] b: [0.9999908] cost: 5.699973826267524e-11
37+
"""

lab-03-1-minimizing_cost_show_graph.py

+8-9
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
# Lab 3 Minimizing Cost
22
import tensorflow as tf
33
import matplotlib.pyplot as plt
4-
tf.set_random_seed(777) # for reproducibility
54

65
X = [1, 2, 3]
76
Y = [1, 2, 3]
@@ -14,18 +13,18 @@
1413
# cost/loss function
1514
cost = tf.reduce_mean(tf.square(hypothesis - Y))
1615

17-
# Launch the graph in a session.
18-
sess = tf.Session()
19-
2016
# Variables for plotting cost function
2117
W_history = []
2218
cost_history = []
2319

24-
for i in range(-30, 50):
25-
curr_W = i * 0.1
26-
curr_cost = sess.run(cost, feed_dict={W: curr_W})
27-
W_history.append(curr_W)
28-
cost_history.append(curr_cost)
20+
# Launch the graph in a session.
21+
with tf.Session() as sess:
22+
for i in range(-30, 50):
23+
curr_W = i * 0.1
24+
curr_cost = sess.run(cost, feed_dict={W: curr_W})
25+
26+
W_history.append(curr_W)
27+
cost_history.append(curr_cost)
2928

3029
# Show the cost function
3130
plt.plot(W_history, cost_history)

lab-03-2-minimizing_cost_gradient_update.py

+12-9
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
# Lab 3 Minimizing Cost
22
import tensorflow as tf
3+
34
tf.set_random_seed(777) # for reproducibility
45

56
x_data = [1, 2, 3]
@@ -8,7 +9,7 @@
89
# Try to find values for W and b to compute y_data = W * x_data
910
# We know that W should be 1
1011
# But let's use TensorFlow to figure it out
11-
W = tf.Variable(tf.random_normal([1]), name='weight')
12+
W = tf.Variable(tf.random_normal([1]), name="weight")
1213

1314
X = tf.placeholder(tf.float32)
1415
Y = tf.placeholder(tf.float32)
@@ -26,15 +27,17 @@
2627
update = W.assign(descent)
2728

2829
# Launch the graph in a session.
29-
sess = tf.Session()
30-
# Initializes global variables in the graph.
31-
sess.run(tf.global_variables_initializer())
30+
with tf.Session() as sess:
31+
# Initializes global variables in the graph.
32+
sess.run(tf.global_variables_initializer())
3233

33-
for step in range(21):
34-
_, cost_val, W_val = sess.run([update, cost, W], feed_dict={X: x_data, Y: y_data})
35-
print(step, cost_val, W_val)
34+
for step in range(21):
35+
_, cost_val, W_val = sess.run(
36+
[update, cost, W], feed_dict={X: x_data, Y: y_data}
37+
)
38+
print(step, cost_val, W_val)
3639

37-
'''
40+
"""
3841
0 1.93919 [ 1.64462376]
3942
1 0.551591 [ 1.34379935]
4043
2 0.156897 [ 1.18335962]
@@ -56,4 +59,4 @@
5659
18 2.88878e-10 [ 1.00000787]
5760
19 8.02487e-11 [ 1.00000417]
5861
20 2.34053e-11 [ 1.00000226]
59-
'''
62+
"""
+15-17
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
# Lab 3 Minimizing Cost
22
import tensorflow as tf
3-
tf.set_random_seed(777) # for reproducibility
43

54
# tf Graph Input
65
X = [1, 2, 3]
@@ -15,28 +14,27 @@
1514
# cost/loss function
1615
cost = tf.reduce_mean(tf.square(hypothesis - Y))
1716

18-
# Minimize: Gradient Descent Magic
19-
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
20-
train = optimizer.minimize(cost)
17+
# Minimize: Gradient Descent Optimizer
18+
train = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(cost)
2119

2220
# Launch the graph in a session.
23-
sess = tf.Session()
24-
# Initializes global variables in the graph.
25-
sess.run(tf.global_variables_initializer())
21+
with tf.Session() as sess:
22+
# Initializes global variables in the graph.
23+
sess.run(tf.global_variables_initializer())
2624

27-
for step in range(100):
28-
W_val, _ = sess.run([W, train])
29-
print(step, W_val)
25+
for step in range(101):
26+
_, W_val = sess.run([train, W])
27+
print(step, W_val)
3028

31-
'''
29+
"""
3230
0 5.0
33-
1 1.26667
34-
2 1.01778
35-
3 1.00119
36-
4 1.00008
31+
1 1.2666664
32+
2 1.0177778
33+
3 1.0011852
34+
4 1.000079
3735
...
38-
96 1.0
3936
97 1.0
4037
98 1.0
4138
99 1.0
42-
'''
39+
100 1.0
40+
"""

0 commit comments

Comments
 (0)