|
10 | 10 |
|
11 | 11 | print(x_data.shape, y_data.shape)
|
12 | 12 |
|
| 13 | +''' |
| 14 | +(101, 16) (101, 1) |
| 15 | +''' |
| 16 | + |
13 | 17 | nb_classes = 7 # 0 ~ 6
|
14 | 18 |
|
15 | 19 | X = tf.placeholder(tf.float32, [None, 16])
|
16 | 20 | Y = tf.placeholder(tf.int32, [None, 1]) # 0 ~ 6
|
| 21 | + |
17 | 22 | Y_one_hot = tf.one_hot(Y, nb_classes) # one hot
|
18 |
| -print("one_hot", Y_one_hot) |
| 23 | +print("one_hot:", Y_one_hot) |
19 | 24 | Y_one_hot = tf.reshape(Y_one_hot, [-1, nb_classes])
|
20 |
| -print("reshape", Y_one_hot) |
| 25 | +print("reshape one_hot:", Y_one_hot) |
| 26 | + |
| 27 | +''' |
| 28 | +one_hot: Tensor("one_hot:0", shape=(?, 1, 7), dtype=float32) |
| 29 | +reshape one_hot: Tensor("Reshape:0", shape=(?, 7), dtype=float32) |
| 30 | +''' |
21 | 31 |
|
22 | 32 | W = tf.Variable(tf.random_normal([16, nb_classes]), name='weight')
|
23 | 33 | b = tf.Variable(tf.random_normal([nb_classes]), name='bias')
|
|
28 | 38 | hypothesis = tf.nn.softmax(logits)
|
29 | 39 |
|
30 | 40 | # Cross entropy cost/loss
|
31 |
| -cost_i = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, |
32 |
| - labels=tf.stop_gradient([Y_one_hot])) |
33 |
| -cost = tf.reduce_mean(cost_i) |
| 41 | +cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, |
| 42 | + labels=tf.stop_gradient([Y_one_hot]))) |
34 | 43 | optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(cost)
|
35 | 44 |
|
36 | 45 | prediction = tf.argmax(hypothesis, 1)
|
37 | 46 | correct_prediction = tf.equal(prediction, tf.argmax(Y_one_hot, 1))
|
38 | 47 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
|
| 48 | + |
39 | 49 | # Launch graph
|
40 | 50 | with tf.Session() as sess:
|
41 | 51 | sess.run(tf.global_variables_initializer())
|
42 | 52 |
|
43 |
| - for step in range(2000): |
44 |
| - sess.run(optimizer, feed_dict={X: x_data, Y: y_data}) |
| 53 | + for step in range(2001): |
| 54 | + _, cost_val, acc_val = sess.run([optimizer, cost, accuracy], feed_dict={X: x_data, Y: y_data}) |
| 55 | + |
45 | 56 | if step % 100 == 0:
|
46 |
| - loss, acc = sess.run([cost, accuracy], feed_dict={ |
47 |
| - X: x_data, Y: y_data}) |
48 |
| - print("Step: {:5}\tLoss: {:.3f}\tAcc: {:.2%}".format( |
49 |
| - step, loss, acc)) |
| 57 | + print("Step: {:5}\tCost: {:.3f}\tAcc: {:.2%}".format(step, cost_val, acc_val)) |
50 | 58 |
|
51 | 59 | # Let's see if we can predict
|
52 | 60 | pred = sess.run(prediction, feed_dict={X: x_data})
|
|
58 | 66 | Step: 0 Loss: 5.106 Acc: 37.62%
|
59 | 67 | Step: 100 Loss: 0.800 Acc: 79.21%
|
60 | 68 | Step: 200 Loss: 0.486 Acc: 88.12%
|
61 |
| -Step: 300 Loss: 0.349 Acc: 90.10% |
62 |
| -Step: 400 Loss: 0.272 Acc: 94.06% |
63 |
| -Step: 500 Loss: 0.222 Acc: 95.05% |
64 |
| -Step: 600 Loss: 0.187 Acc: 97.03% |
65 |
| -Step: 700 Loss: 0.161 Acc: 97.03% |
66 |
| -Step: 800 Loss: 0.140 Acc: 97.03% |
67 |
| -Step: 900 Loss: 0.124 Acc: 97.03% |
68 |
| -Step: 1000 Loss: 0.111 Acc: 97.03% |
69 |
| -Step: 1100 Loss: 0.101 Acc: 99.01% |
70 |
| -Step: 1200 Loss: 0.092 Acc: 100.00% |
71 |
| -Step: 1300 Loss: 0.084 Acc: 100.00% |
72 | 69 | ...
|
| 70 | +Step: 1800 Loss: 0.060 Acc: 100.00% |
| 71 | +Step: 1900 Loss: 0.057 Acc: 100.00% |
| 72 | +Step: 2000 Loss: 0.054 Acc: 100.00% |
73 | 73 | [True] Prediction: 0 True Y: 0
|
74 | 74 | [True] Prediction: 0 True Y: 0
|
75 | 75 | [True] Prediction: 3 True Y: 3
|
| 76 | +... |
76 | 77 | [True] Prediction: 0 True Y: 0
|
77 |
| -[True] Prediction: 0 True Y: 0 |
78 |
| -[True] Prediction: 0 True Y: 0 |
79 |
| -[True] Prediction: 0 True Y: 0 |
80 |
| -[True] Prediction: 3 True Y: 3 |
81 |
| -[True] Prediction: 3 True Y: 3 |
82 |
| -[True] Prediction: 0 True Y: 0 |
| 78 | +[True] Prediction: 6 True Y: 6 |
| 79 | +[True] Prediction: 1 True Y: 1 |
83 | 80 | '''
|
0 commit comments