Skip to content

Commit 796da36

Browse files
qoocrabkkweon
authored andcommitted
Update lab-06-2-softmax_zoo_classifier.py (hunkim#231)
* Update lab-06-2-softmax_zoo_classifier.py 1. Some line change for readability. 2. Add more detail output. * Update lab-06-2-softmax_zoo_classifier.py * Update lab-06-2-softmax_zoo_classifier.py
1 parent 690b03c commit 796da36

File tree

1 file changed

+25
-28
lines changed

1 file changed

+25
-28
lines changed

lab-06-2-softmax_zoo_classifier.py

+25-28
Original file line numberDiff line numberDiff line change
@@ -10,14 +10,24 @@
1010

1111
print(x_data.shape, y_data.shape)
1212

13+
'''
14+
(101, 16) (101, 1)
15+
'''
16+
1317
nb_classes = 7 # 0 ~ 6
1418

1519
X = tf.placeholder(tf.float32, [None, 16])
1620
Y = tf.placeholder(tf.int32, [None, 1]) # 0 ~ 6
21+
1722
Y_one_hot = tf.one_hot(Y, nb_classes) # one hot
18-
print("one_hot", Y_one_hot)
23+
print("one_hot:", Y_one_hot)
1924
Y_one_hot = tf.reshape(Y_one_hot, [-1, nb_classes])
20-
print("reshape", Y_one_hot)
25+
print("reshape one_hot:", Y_one_hot)
26+
27+
'''
28+
one_hot: Tensor("one_hot:0", shape=(?, 1, 7), dtype=float32)
29+
reshape one_hot: Tensor("Reshape:0", shape=(?, 7), dtype=float32)
30+
'''
2131

2232
W = tf.Variable(tf.random_normal([16, nb_classes]), name='weight')
2333
b = tf.Variable(tf.random_normal([nb_classes]), name='bias')
@@ -28,25 +38,23 @@
2838
hypothesis = tf.nn.softmax(logits)
2939

3040
# Cross entropy cost/loss
31-
cost_i = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits,
32-
labels=tf.stop_gradient([Y_one_hot]))
33-
cost = tf.reduce_mean(cost_i)
41+
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits,
42+
labels=tf.stop_gradient([Y_one_hot])))
3443
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(cost)
3544

3645
prediction = tf.argmax(hypothesis, 1)
3746
correct_prediction = tf.equal(prediction, tf.argmax(Y_one_hot, 1))
3847
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
48+
3949
# Launch graph
4050
with tf.Session() as sess:
4151
sess.run(tf.global_variables_initializer())
4252

43-
for step in range(2000):
44-
sess.run(optimizer, feed_dict={X: x_data, Y: y_data})
53+
for step in range(2001):
54+
_, cost_val, acc_val = sess.run([optimizer, cost, accuracy], feed_dict={X: x_data, Y: y_data})
55+
4556
if step % 100 == 0:
46-
loss, acc = sess.run([cost, accuracy], feed_dict={
47-
X: x_data, Y: y_data})
48-
print("Step: {:5}\tLoss: {:.3f}\tAcc: {:.2%}".format(
49-
step, loss, acc))
57+
print("Step: {:5}\tCost: {:.3f}\tAcc: {:.2%}".format(step, cost_val, acc_val))
5058

5159
# Let's see if we can predict
5260
pred = sess.run(prediction, feed_dict={X: x_data})
@@ -58,26 +66,15 @@
5866
Step: 0 Loss: 5.106 Acc: 37.62%
5967
Step: 100 Loss: 0.800 Acc: 79.21%
6068
Step: 200 Loss: 0.486 Acc: 88.12%
61-
Step: 300 Loss: 0.349 Acc: 90.10%
62-
Step: 400 Loss: 0.272 Acc: 94.06%
63-
Step: 500 Loss: 0.222 Acc: 95.05%
64-
Step: 600 Loss: 0.187 Acc: 97.03%
65-
Step: 700 Loss: 0.161 Acc: 97.03%
66-
Step: 800 Loss: 0.140 Acc: 97.03%
67-
Step: 900 Loss: 0.124 Acc: 97.03%
68-
Step: 1000 Loss: 0.111 Acc: 97.03%
69-
Step: 1100 Loss: 0.101 Acc: 99.01%
70-
Step: 1200 Loss: 0.092 Acc: 100.00%
71-
Step: 1300 Loss: 0.084 Acc: 100.00%
7269
...
70+
Step: 1800 Loss: 0.060 Acc: 100.00%
71+
Step: 1900 Loss: 0.057 Acc: 100.00%
72+
Step: 2000 Loss: 0.054 Acc: 100.00%
7373
[True] Prediction: 0 True Y: 0
7474
[True] Prediction: 0 True Y: 0
7575
[True] Prediction: 3 True Y: 3
76+
...
7677
[True] Prediction: 0 True Y: 0
77-
[True] Prediction: 0 True Y: 0
78-
[True] Prediction: 0 True Y: 0
79-
[True] Prediction: 0 True Y: 0
80-
[True] Prediction: 3 True Y: 3
81-
[True] Prediction: 3 True Y: 3
82-
[True] Prediction: 0 True Y: 0
78+
[True] Prediction: 6 True Y: 6
79+
[True] Prediction: 1 True Y: 1
8380
'''

0 commit comments

Comments
 (0)